Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
296d0124
Commit
296d0124
authored
Sep 07, 2022
by
AUTOMATIC1111
Committed by
GitHub
Sep 07, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #108 from xeonvs/mps-support
Added support for launching on Apple Silicon M1/M2
parents
ee29bb77
ba1124b3
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
18 additions
and
9 deletions
+18
-9
modules/esrgan_model.py
modules/esrgan_model.py
+5
-2
modules/lowvram.py
modules/lowvram.py
+6
-3
modules/sd_hijack.py
modules/sd_hijack.py
+1
-1
modules/shared.py
modules/shared.py
+6
-3
No files found.
modules/esrgan_model.py
View file @
296d0124
...
...
@@ -14,8 +14,11 @@ import modules.images
def
load_model
(
filename
):
# this code is adapted from https://github.com/xinntao/ESRGAN
pretrained_net
=
torch
.
load
(
filename
)
if
torch
.
has_mps
:
map_l
=
'cpu'
else
:
map_l
=
None
pretrained_net
=
torch
.
load
(
filename
,
map_location
=
map_l
)
crt_model
=
arch
.
RRDBNet
(
3
,
3
,
64
,
23
,
gc
=
32
)
if
'conv_first.weight'
in
pretrained_net
:
...
...
modules/lowvram.py
View file @
296d0124
...
...
@@ -2,9 +2,12 @@ import torch
module_in_gpu
=
None
cpu
=
torch
.
device
(
"cpu"
)
gpu
=
torch
.
device
(
"cuda"
)
device
=
gpu
if
torch
.
cuda
.
is_available
()
else
cpu
if
torch
.
has_cuda
:
device
=
gpu
=
torch
.
device
(
"cuda"
)
elif
torch
.
has_mps
:
device
=
gpu
=
torch
.
device
(
"mps"
)
else
:
device
=
gpu
=
torch
.
device
(
"cpu"
)
def
setup_for_low_vram
(
sd_model
,
use_medvram
):
parents
=
{}
...
...
modules/sd_hijack.py
View file @
296d0124
...
...
@@ -232,7 +232,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
z
=
outputs
.
last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers
=
torch
.
asarray
(
np
.
array
(
batch_multipliers
)
)
.
to
(
device
)
batch_multipliers
=
torch
.
asarray
(
batch_multipliers
)
.
to
(
device
)
original_mean
=
z
.
mean
()
z
*=
batch_multipliers
.
reshape
(
batch_multipliers
.
shape
+
(
1
,))
.
expand
(
z
.
shape
)
new_mean
=
z
.
mean
()
...
...
modules/shared.py
View file @
296d0124
...
...
@@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o
parser
.
add_argument
(
"--listen"
,
action
=
'store_true'
,
help
=
"launch gradio with 0.0.0.0 as server name, allowing to respond to network requests"
)
cmd_opts
=
parser
.
parse_args
()
cpu
=
torch
.
device
(
"cpu"
)
gpu
=
torch
.
device
(
"cuda"
)
device
=
gpu
if
torch
.
cuda
.
is_available
()
else
cpu
if
torch
.
has_cuda
:
device
=
torch
.
device
(
"cuda"
)
elif
torch
.
has_mps
:
device
=
torch
.
device
(
"mps"
)
else
:
device
=
torch
.
device
(
"cpu"
)
batch_cond_uncond
=
cmd_opts
.
always_batch_cond_uncond
or
not
(
cmd_opts
.
lowvram
or
cmd_opts
.
medvram
)
parallel_processing_allowed
=
not
cmd_opts
.
lowvram
and
not
cmd_opts
.
medvram
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment