Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
f0e2098f
Commit
f0e2098f
authored
Jul 17, 2023
by
brkirch
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add support for `--upcast-sampling` with SD XL
parent
a99d5708
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
2 deletions
+8
-2
modules/sd_hijack_unet.py
modules/sd_hijack_unet.py
+7
-1
modules/sd_models.py
modules/sd_models.py
+1
-1
No files found.
modules/sd_hijack_unet.py
View file @
f0e2098f
...
@@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
...
@@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
if
isinstance
(
cond
,
dict
):
if
isinstance
(
cond
,
dict
):
for
y
in
cond
.
keys
():
for
y
in
cond
.
keys
():
cond
[
y
]
=
[
x
.
to
(
devices
.
dtype_unet
)
if
isinstance
(
x
,
torch
.
Tensor
)
else
x
for
x
in
cond
[
y
]]
if
isinstance
(
cond
[
y
],
list
):
cond
[
y
]
=
[
x
.
to
(
devices
.
dtype_unet
)
if
isinstance
(
x
,
torch
.
Tensor
)
else
x
for
x
in
cond
[
y
]]
else
:
cond
[
y
]
=
cond
[
y
]
.
to
(
devices
.
dtype_unet
)
if
isinstance
(
cond
[
y
],
torch
.
Tensor
)
else
cond
[
y
]
with
devices
.
autocast
():
with
devices
.
autocast
():
return
orig_func
(
self
,
x_noisy
.
to
(
devices
.
dtype_unet
),
t
.
to
(
devices
.
dtype_unet
),
cond
,
**
kwargs
)
.
float
()
return
orig_func
(
self
,
x_noisy
.
to
(
devices
.
dtype_unet
),
t
.
to
(
devices
.
dtype_unet
),
cond
,
**
kwargs
)
.
float
()
...
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi
...
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage'
,
first_stage_sub
,
first_stage_cond
)
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage'
,
first_stage_sub
,
first_stage_cond
)
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage'
,
first_stage_sub
,
first_stage_cond
)
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage'
,
first_stage_sub
,
first_stage_cond
)
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding'
,
lambda
orig_func
,
*
args
,
**
kwargs
:
orig_func
(
*
args
,
**
kwargs
)
.
float
(),
first_stage_cond
)
CondFunc
(
'ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding'
,
lambda
orig_func
,
*
args
,
**
kwargs
:
orig_func
(
*
args
,
**
kwargs
)
.
float
(),
first_stage_cond
)
CondFunc
(
'sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward'
,
apply_model
,
unet_needs_upcast
)
CondFunc
(
'sgm.modules.diffusionmodules.openaimodel.timestep_embedding'
,
lambda
orig_func
,
timesteps
,
*
args
,
**
kwargs
:
orig_func
(
timesteps
,
*
args
,
**
kwargs
)
.
to
(
torch
.
float32
if
timesteps
.
dtype
==
torch
.
int64
else
devices
.
dtype_unet
),
unet_needs_upcast
)
modules/sd_models.py
View file @
f0e2098f
...
@@ -326,7 +326,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
...
@@ -326,7 +326,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
timer
.
record
(
"apply half()"
)
timer
.
record
(
"apply half()"
)
devices
.
dtype_unet
=
model
.
model
.
diffusion_model
.
dtype
devices
.
dtype_unet
=
torch
.
float16
if
model
.
is_sdxl
and
not
shared
.
cmd_opts
.
no_half
else
model
.
model
.
diffusion_model
.
dtype
devices
.
unet_needs_upcast
=
shared
.
cmd_opts
.
upcast_sampling
and
devices
.
dtype
==
torch
.
float16
and
devices
.
dtype_unet
==
torch
.
float16
devices
.
unet_needs_upcast
=
shared
.
cmd_opts
.
upcast_sampling
and
devices
.
dtype
==
torch
.
float16
and
devices
.
dtype_unet
==
torch
.
float16
model
.
first_stage_model
.
to
(
devices
.
dtype_vae
)
model
.
first_stage_model
.
to
(
devices
.
dtype_vae
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment