Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
26a5a543
Commit
26a5a543
authored
Oct 29, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
running without autocast
parent
405c8171
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
34 additions
and
5 deletions
+34
-5
modules/processing.py
modules/processing.py
+3
-2
modules/prompt_parser.py
modules/prompt_parser.py
+2
-1
modules/sd_models.py
modules/sd_models.py
+22
-1
modules/sd_samplers.py
modules/sd_samplers.py
+7
-1
No files found.
modules/processing.py
View file @
26a5a543
...
...
@@ -372,8 +372,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if
p
.
n_iter
>
1
:
shared
.
state
.
job
=
f
"Batch {n+1} out of {p.n_iter}"
with
devices
.
autocast
():
samples_ddim
=
p
.
sample
(
conditioning
=
c
,
unconditional_conditioning
=
uc
,
seeds
=
seeds
,
subseeds
=
subseeds
,
subseed_strength
=
p
.
subseed_strength
)
#with devices.autocast():
samples_ddim
=
p
.
sample
(
conditioning
=
c
,
unconditional_conditioning
=
uc
,
seeds
=
seeds
,
subseeds
=
subseeds
,
subseed_strength
=
p
.
subseed_strength
)
if
state
.
interrupted
:
...
...
modules/prompt_parser.py
View file @
26a5a543
...
...
@@ -10,6 +10,7 @@ import lark
# [60, 'fantasy landscape with a lake and an oak in foreground in background masterful']
# [75, 'fantasy landscape with a lake and an oak in background masterful']
# [100, 'fantasy landscape with a lake and a christmas tree in background masterful']
from
modules
import
devices
schedule_parser
=
lark
.
Lark
(
r"""
!start: (prompt | /[][():]/+)*
...
...
@@ -130,7 +131,7 @@ def get_learned_conditioning(model, prompts, steps):
continue
texts
=
[
x
[
1
]
for
x
in
prompt_schedule
]
conds
=
model
.
get_learned_conditioning
(
texts
)
conds
=
model
.
get_learned_conditioning
(
texts
)
.
to
(
devices
.
dtype
)
cond_schedule
=
[]
for
i
,
(
end_at_step
,
text
)
in
enumerate
(
prompt_schedule
):
...
...
modules/sd_models.py
View file @
26a5a543
...
...
@@ -5,7 +5,9 @@ from collections import namedtuple
import
torch
from
omegaconf
import
OmegaConf
import
ldm.modules.diffusionmodules.model
import
ldm.modules.diffusionmodules.openaimodel
import
ldm.modules.diffusionmodules.util
from
ldm.util
import
instantiate_from_config
from
modules
import
shared
,
modelloader
,
devices
...
...
@@ -27,6 +29,23 @@ except Exception:
pass
def
timestep_embedding
(
*
args
,
**
kwargs
):
return
ldm_modules_diffusionmodules_util_timestep_embedding
(
*
args
,
**
kwargs
)
.
to
(
devices
.
dtype
)
ldm_modules_diffusionmodules_util_timestep_embedding
=
ldm
.
modules
.
diffusionmodules
.
openaimodel
.
timestep_embedding
ldm
.
modules
.
diffusionmodules
.
openaimodel
.
timestep_embedding
=
timestep_embedding
class
GroupNorm32
(
torch
.
nn
.
GroupNorm
):
def
forward
(
self
,
x
):
return
super
()
.
forward
(
x
)
.
type
(
x
.
dtype
)
ldm
.
modules
.
diffusionmodules
.
util
.
GroupNorm32
=
GroupNorm32
def
setup_model
():
if
not
os
.
path
.
exists
(
model_path
):
os
.
makedirs
(
model_path
)
...
...
@@ -133,6 +152,8 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
model
.
half
()
devices
.
dtype
=
torch
.
float32
if
shared
.
cmd_opts
.
no_half
else
torch
.
float16
model
.
model
.
diffusion_model
.
dtype
=
devices
.
dtype
torch
.
set_default_tensor_type
(
torch
.
FloatTensor
if
shared
.
cmd_opts
.
no_half
else
torch
.
HalfTensor
)
model
.
sd_model_hash
=
sd_model_hash
model
.
sd_model_checkpint
=
checkpoint_file
...
...
modules/sd_samplers.py
View file @
26a5a543
...
...
@@ -7,7 +7,7 @@ import inspect
import
k_diffusion.sampling
import
ldm.models.diffusion.ddim
import
ldm.models.diffusion.plms
from
modules
import
prompt_parser
from
modules
import
prompt_parser
,
devices
from
modules.shared
import
opts
,
cmd_opts
,
state
import
modules.shared
as
shared
...
...
@@ -339,9 +339,13 @@ class KDiffusionSampler:
if
p
.
sampler_noise_scheduler_override
:
sigmas
=
p
.
sampler_noise_scheduler_override
(
steps
)
elif
self
.
config
is
not
None
and
self
.
config
.
options
.
get
(
'scheduler'
,
None
)
==
'karras'
:
sigmas
=
k_diffusion
.
sampling
.
get_sigmas_karras
(
n
=
steps
,
sigma_min
=
0.1
,
sigma_max
=
10
,
device
=
shared
.
device
)
else
:
sigmas
=
self
.
model_wrap
.
get_sigmas
(
steps
)
sigmas
=
sigmas
.
to
(
devices
.
dtype
)
noise
=
noise
*
sigmas
[
steps
-
t_enc
-
1
]
xi
=
x
+
noise
...
...
@@ -363,6 +367,8 @@ class KDiffusionSampler:
else
:
sigmas
=
self
.
model_wrap
.
get_sigmas
(
steps
)
sigmas
=
sigmas
.
to
(
devices
.
dtype
)
x
=
x
*
sigmas
[
0
]
extra_params_kwargs
=
self
.
initialize
(
p
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment