Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
f7c787eb
Commit
f7c787eb
authored
Oct 07, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
make it possible to use hypernetworks without opt split attention
parent
97bc0b95
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
38 additions
and
10 deletions
+38
-10
modules/hypernetwork.py
modules/hypernetwork.py
+34
-8
modules/sd_hijack.py
modules/sd_hijack.py
+4
-2
No files found.
modules/hypernetwork.py
View file @
f7c787eb
...
@@ -4,7 +4,12 @@ import sys
...
@@ -4,7 +4,12 @@ import sys
import
traceback
import
traceback
import
torch
import
torch
from
modules
import
devices
from
ldm.util
import
default
from
modules
import
devices
,
shared
import
torch
from
torch
import
einsum
from
einops
import
rearrange
,
repeat
class
HypernetworkModule
(
torch
.
nn
.
Module
):
class
HypernetworkModule
(
torch
.
nn
.
Module
):
...
@@ -48,15 +53,36 @@ def load_hypernetworks(path):
...
@@ -48,15 +53,36 @@ def load_hypernetworks(path):
return
res
return
res
def
apply
(
self
,
x
,
context
=
None
,
mask
=
None
,
original
=
None
):
def
attention_CrossAttention_forward
(
self
,
x
,
context
=
None
,
mask
=
None
):
h
=
self
.
heads
q
=
self
.
to_q
(
x
)
context
=
default
(
context
,
x
)
if
CrossAttention
.
hypernetwork
is
not
None
and
context
.
shape
[
2
]
in
CrossAttention
.
hypernetwork
:
hypernetwork
=
shared
.
selected_hypernetwork
()
if
context
.
shape
[
1
]
==
77
and
CrossAttention
.
noise_cond
:
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
context
=
context
+
(
torch
.
randn_like
(
context
)
*
0.1
)
h_k
,
h_v
=
CrossAttention
.
hypernetwork
[
context
.
shape
[
2
]]
if
hypernetwork_layers
is
not
None
:
k
=
self
.
to_k
(
h
_k
(
context
))
k
=
self
.
to_k
(
h
ypernetwork_layers
[
0
]
(
context
))
v
=
self
.
to_v
(
h
_v
(
context
))
v
=
self
.
to_v
(
h
ypernetwork_layers
[
1
]
(
context
))
else
:
else
:
k
=
self
.
to_k
(
context
)
k
=
self
.
to_k
(
context
)
v
=
self
.
to_v
(
context
)
v
=
self
.
to_v
(
context
)
q
,
k
,
v
=
map
(
lambda
t
:
rearrange
(
t
,
'b n (h d) -> (b h) n d'
,
h
=
h
),
(
q
,
k
,
v
))
sim
=
einsum
(
'b i d, b j d -> b i j'
,
q
,
k
)
*
self
.
scale
if
mask
is
not
None
:
mask
=
rearrange
(
mask
,
'b ... -> b (...)'
)
max_neg_value
=
-
torch
.
finfo
(
sim
.
dtype
)
.
max
mask
=
repeat
(
mask
,
'b j -> (b h) () j'
,
h
=
h
)
sim
.
masked_fill_
(
~
mask
,
max_neg_value
)
# attention, what we cannot get enough of
attn
=
sim
.
softmax
(
dim
=-
1
)
out
=
einsum
(
'b i j, b j d -> b i d'
,
attn
,
v
)
out
=
rearrange
(
out
,
'(b h) n d -> b n (h d)'
,
h
=
h
)
return
self
.
to_out
(
out
)
modules/sd_hijack.py
View file @
f7c787eb
...
@@ -8,7 +8,7 @@ from torch import einsum
...
@@ -8,7 +8,7 @@ from torch import einsum
from
torch.nn.functional
import
silu
from
torch.nn.functional
import
silu
import
modules.textual_inversion.textual_inversion
import
modules.textual_inversion.textual_inversion
from
modules
import
prompt_parser
,
devices
,
sd_hijack_optimizations
,
shared
from
modules
import
prompt_parser
,
devices
,
sd_hijack_optimizations
,
shared
,
hypernetwork
from
modules.shared
import
opts
,
device
,
cmd_opts
from
modules.shared
import
opts
,
device
,
cmd_opts
import
ldm.modules.attention
import
ldm.modules.attention
...
@@ -20,6 +20,8 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
...
@@ -20,6 +20,8 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def
apply_optimizations
():
def
apply_optimizations
():
undo_optimizations
()
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
silu
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
silu
if
cmd_opts
.
opt_split_attention_v1
:
if
cmd_opts
.
opt_split_attention_v1
:
...
@@ -30,7 +32,7 @@ def apply_optimizations():
...
@@ -30,7 +32,7 @@ def apply_optimizations():
def
undo_optimizations
():
def
undo_optimizations
():
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
attention_CrossAttention_forward
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
hypernetwork
.
attention_CrossAttention_forward
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
diffusionmodules_model_nonlinearity
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
diffusionmodules_model_nonlinearity
ldm
.
modules
.
diffusionmodules
.
model
.
AttnBlock
.
forward
=
diffusionmodules_model_AttnBlock_forward
ldm
.
modules
.
diffusionmodules
.
model
.
AttnBlock
.
forward
=
diffusionmodules_model_AttnBlock_forward
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment