Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
b5d1af11
Commit
b5d1af11
authored
Sep 11, 2022
by
Abdullah Barhoum
Committed by
AUTOMATIC1111
Sep 11, 2022
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Modular device management
parent
065e310a
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
19 additions
and
13 deletions
+19
-13
modules/devices.py
modules/devices.py
+12
-0
modules/esrgan_model.py
modules/esrgan_model.py
+2
-1
modules/lowvram.py
modules/lowvram.py
+2
-6
modules/shared.py
modules/shared.py
+3
-6
No files found.
modules/devices.py
0 → 100644
View file @
b5d1af11
import
torch
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
has_mps
=
getattr
(
torch
,
'has_mps'
,
False
)
def
get_optimal_device
():
if
torch
.
cuda
.
is_available
():
return
torch
.
device
(
"cuda"
)
if
has_mps
:
return
torch
.
device
(
"mps"
)
return
torch
.
device
(
"cpu"
)
modules/esrgan_model.py
View file @
b5d1af11
...
...
@@ -9,12 +9,13 @@ from PIL import Image
import
modules.esrgam_model_arch
as
arch
from
modules
import
shared
from
modules.shared
import
opts
from
modules.devices
import
has_mps
import
modules.images
def
load_model
(
filename
):
# this code is adapted from https://github.com/xinntao/ESRGAN
pretrained_net
=
torch
.
load
(
filename
,
map_location
=
'cpu'
if
torch
.
has_mps
else
None
)
pretrained_net
=
torch
.
load
(
filename
,
map_location
=
'cpu'
if
has_mps
else
None
)
crt_model
=
arch
.
RRDBNet
(
3
,
3
,
64
,
23
,
gc
=
32
)
if
'conv_first.weight'
in
pretrained_net
:
...
...
modules/lowvram.py
View file @
b5d1af11
import
torch
from
modules.devices
import
get_optimal_device
module_in_gpu
=
None
cpu
=
torch
.
device
(
"cpu"
)
if
torch
.
has_cuda
:
device
=
gpu
=
torch
.
device
(
"cuda"
)
elif
torch
.
has_mps
:
device
=
gpu
=
torch
.
device
(
"mps"
)
else
:
device
=
gpu
=
torch
.
device
(
"cpu"
)
device
=
gpu
=
get_optimal_device
()
def
setup_for_low_vram
(
sd_model
,
use_medvram
):
parents
=
{}
...
...
modules/shared.py
View file @
b5d1af11
...
...
@@ -9,6 +9,7 @@ import tqdm
import
modules.artists
from
modules.paths
import
script_path
,
sd_path
from
modules.devices
import
get_optimal_device
import
modules.styles
config_filename
=
"config.json"
...
...
@@ -43,12 +44,8 @@ parser.add_argument("--ui-config-file", type=str, help="filename to use for ui c
cmd_opts
=
parser
.
parse_args
()
if
torch
.
has_cuda
:
device
=
torch
.
device
(
"cuda"
)
elif
torch
.
has_mps
:
device
=
torch
.
device
(
"mps"
)
else
:
device
=
torch
.
device
(
"cpu"
)
device
=
get_optimal_device
()
batch_cond_uncond
=
cmd_opts
.
always_batch_cond_uncond
or
not
(
cmd_opts
.
lowvram
or
cmd_opts
.
medvram
)
parallel_processing_allowed
=
not
cmd_opts
.
lowvram
and
not
cmd_opts
.
medvram
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment