Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
2cfcb23c
Commit
2cfcb23c
authored
Oct 06, 2022
by
AUTOMATIC1111
Committed by
GitHub
Oct 06, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #1283 from jn-jairo/fix-vram
Fix memory leak and reduce memory usage
parents
82eb8ea4
b66aa334
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
20 additions
and
10 deletions
+20
-10
modules/extras.py
modules/extras.py
+2
-0
modules/processing.py
modules/processing.py
+15
-1
modules/sd_hijack.py
modules/sd_hijack.py
+3
-1
modules/sd_hijack_optimizations.py
modules/sd_hijack_optimizations.py
+0
-8
No files found.
modules/extras.py
View file @
2cfcb23c
...
...
@@ -100,6 +100,8 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
outputs
.
append
(
image
)
devices
.
torch_gc
()
return
outputs
,
plaintext_to_html
(
info
),
''
...
...
modules/processing.py
View file @
2cfcb23c
...
...
@@ -11,7 +11,7 @@ import cv2
from
skimage
import
exposure
import
modules.sd_hijack
from
modules
import
devices
,
prompt_parser
,
masking
,
sd_samplers
from
modules
import
devices
,
prompt_parser
,
masking
,
sd_samplers
,
lowvram
from
modules.sd_hijack
import
model_hijack
from
modules.shared
import
opts
,
cmd_opts
,
state
import
modules.shared
as
shared
...
...
@@ -382,6 +382,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
x_samples_ddim
=
p
.
sd_model
.
decode_first_stage
(
samples_ddim
)
x_samples_ddim
=
torch
.
clamp
((
x_samples_ddim
+
1.0
)
/
2.0
,
min
=
0.0
,
max
=
1.0
)
del
samples_ddim
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
lowvram
.
send_everything_to_cpu
()
devices
.
torch_gc
()
if
opts
.
filter_nsfw
:
import
modules.safety
as
safety
x_samples_ddim
=
modules
.
safety
.
censor_batch
(
x_samples_ddim
)
...
...
@@ -426,6 +433,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
infotexts
.
append
(
infotext
(
n
,
i
))
output_images
.
append
(
image
)
del
x_samples_ddim
devices
.
torch_gc
()
state
.
nextjob
()
p
.
color_corrections
=
None
...
...
@@ -663,4 +674,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
if
self
.
mask
is
not
None
:
samples
=
samples
*
self
.
nmask
+
self
.
init_latent
*
self
.
mask
del
x
devices
.
torch_gc
()
return
samples
modules/sd_hijack.py
View file @
2cfcb23c
...
...
@@ -5,6 +5,7 @@ import traceback
import
torch
import
numpy
as
np
from
torch
import
einsum
from
torch.nn.functional
import
silu
import
modules.textual_inversion.textual_inversion
from
modules
import
prompt_parser
,
devices
,
sd_hijack_optimizations
,
shared
...
...
@@ -19,11 +20,12 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def
apply_optimizations
():
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
silu
if
cmd_opts
.
opt_split_attention_v1
:
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
sd_hijack_optimizations
.
split_cross_attention_forward_v1
elif
not
cmd_opts
.
disable_opt_split_attention
and
(
cmd_opts
.
opt_split_attention
or
torch
.
cuda
.
is_available
()):
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
sd_hijack_optimizations
.
split_cross_attention_forward
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
sd_hijack_optimizations
.
nonlinearity_hijack
ldm
.
modules
.
diffusionmodules
.
model
.
AttnBlock
.
forward
=
sd_hijack_optimizations
.
cross_attention_attnblock_forward
...
...
modules/sd_hijack_optimizations.py
View file @
2cfcb23c
...
...
@@ -92,14 +92,6 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return
self
.
to_out
(
r2
)
def
nonlinearity_hijack
(
x
):
# swish
t
=
torch
.
sigmoid
(
x
)
x
*=
t
del
t
return
x
def
cross_attention_attnblock_forward
(
self
,
x
):
h_
=
x
h_
=
self
.
norm
(
h_
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment