Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
238adeaf
Commit
238adeaf
authored
Jul 17, 2023
by
AUTOMATIC1111
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
support specifying te and unet weights separately
update lora code support full module
parent
46466f09
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
151 additions
and
78 deletions
+151
-78
extensions-builtin/Lora/extra_networks_lora.py
extensions-builtin/Lora/extra_networks_lora.py
+18
-4
extensions-builtin/Lora/lyco_helpers.py
extensions-builtin/Lora/lyco_helpers.py
+6
-0
extensions-builtin/Lora/network.py
extensions-builtin/Lora/network.py
+39
-1
extensions-builtin/Lora/network_full.py
extensions-builtin/Lora/network_full.py
+23
-0
extensions-builtin/Lora/network_hada.py
extensions-builtin/Lora/network_hada.py
+1
-2
extensions-builtin/Lora/network_ia3.py
extensions-builtin/Lora/network_ia3.py
+1
-2
extensions-builtin/Lora/network_lokr.py
extensions-builtin/Lora/network_lokr.py
+1
-2
extensions-builtin/Lora/network_lora.py
extensions-builtin/Lora/network_lora.py
+44
-28
extensions-builtin/Lora/network_lyco.py
extensions-builtin/Lora/network_lyco.py
+0
-35
extensions-builtin/Lora/networks.py
extensions-builtin/Lora/networks.py
+18
-4
No files found.
extensions-builtin/Lora/extra_networks_lora.py
View file @
238adeaf
...
...
@@ -14,14 +14,28 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
params_list
.
append
(
extra_networks
.
ExtraNetworkParams
(
items
=
[
additional
,
shared
.
opts
.
extra_networks_default_multiplier
]))
names
=
[]
multipliers
=
[]
te_multipliers
=
[]
unet_multipliers
=
[]
dyn_dims
=
[]
for
params
in
params_list
:
assert
params
.
items
names
.
append
(
params
.
items
[
0
])
multipliers
.
append
(
float
(
params
.
items
[
1
])
if
len
(
params
.
items
)
>
1
else
1.0
)
names
.
append
(
params
.
positional
[
0
])
networks
.
load_networks
(
names
,
multipliers
)
te_multiplier
=
float
(
params
.
positional
[
1
])
if
len
(
params
.
positional
)
>
1
else
1.0
te_multiplier
=
float
(
params
.
named
.
get
(
"te"
,
te_multiplier
))
unet_multiplier
=
float
(
params
.
positional
[
2
])
if
len
(
params
.
positional
)
>
2
else
1.0
unet_multiplier
=
float
(
params
.
named
.
get
(
"unet"
,
unet_multiplier
))
dyn_dim
=
int
(
params
.
positional
[
3
])
if
len
(
params
.
positional
)
>
3
else
None
dyn_dim
=
int
(
params
.
named
[
"dyn"
])
if
"dyn"
in
params
.
named
else
dyn_dim
te_multipliers
.
append
(
te_multiplier
)
unet_multipliers
.
append
(
unet_multiplier
)
dyn_dims
.
append
(
dyn_dim
)
networks
.
load_networks
(
names
,
te_multipliers
,
unet_multipliers
,
dyn_dims
)
if
shared
.
opts
.
lora_add_hashes_to_infotext
:
network_hashes
=
[]
...
...
extensions-builtin/Lora/lyco_helpers.py
View file @
238adeaf
...
...
@@ -13,3 +13,9 @@ def rebuild_conventional(up, down, shape, dyn_dim=None):
up
=
up
[:,
:
dyn_dim
]
down
=
down
[:
dyn_dim
,
:]
return
(
up
@
down
)
.
reshape
(
shape
)
def
rebuild_cp_decomposition
(
up
,
down
,
mid
):
up
=
up
.
reshape
(
up
.
size
(
0
),
-
1
)
down
=
down
.
reshape
(
down
.
size
(
0
),
-
1
)
return
torch
.
einsum
(
'n m k l, i n, m j -> i j k l'
,
mid
,
up
,
down
)
extensions-builtin/Lora/network.py
View file @
238adeaf
...
...
@@ -68,7 +68,9 @@ class Network: # LoraModule
def
__init__
(
self
,
name
,
network_on_disk
:
NetworkOnDisk
):
self
.
name
=
name
self
.
network_on_disk
=
network_on_disk
self
.
multiplier
=
1.0
self
.
te_multiplier
=
1.0
self
.
unet_multiplier
=
1.0
self
.
dyn_dim
=
None
self
.
modules
=
{}
self
.
mtime
=
None
...
...
@@ -88,6 +90,42 @@ class NetworkModule:
self
.
sd_key
=
weights
.
sd_key
self
.
sd_module
=
weights
.
sd_module
if
hasattr
(
self
.
sd_module
,
'weight'
):
self
.
shape
=
self
.
sd_module
.
weight
.
shape
self
.
dim
=
None
self
.
bias
=
weights
.
w
.
get
(
"bias"
)
self
.
alpha
=
weights
.
w
[
"alpha"
]
.
item
()
if
"alpha"
in
weights
.
w
else
None
self
.
scale
=
weights
.
w
[
"scale"
]
.
item
()
if
"scale"
in
weights
.
w
else
None
def
multiplier
(
self
):
if
'transformer'
in
self
.
sd_key
[:
20
]:
return
self
.
network
.
te_multiplier
else
:
return
self
.
network
.
unet_multiplier
def
calc_scale
(
self
):
if
self
.
scale
is
not
None
:
return
self
.
scale
if
self
.
dim
is
not
None
and
self
.
alpha
is
not
None
:
return
self
.
alpha
/
self
.
dim
return
1.0
def
finalize_updown
(
self
,
updown
,
orig_weight
,
output_shape
):
if
self
.
bias
is
not
None
:
updown
=
updown
.
reshape
(
self
.
bias
.
shape
)
updown
+=
self
.
bias
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
updown
=
updown
.
reshape
(
output_shape
)
if
len
(
output_shape
)
==
4
:
updown
=
updown
.
reshape
(
output_shape
)
if
orig_weight
.
size
()
.
numel
()
==
updown
.
size
()
.
numel
():
updown
=
updown
.
reshape
(
orig_weight
.
shape
)
return
updown
*
self
.
calc_scale
()
*
self
.
multiplier
()
def
calc_updown
(
self
,
target
):
raise
NotImplementedError
()
...
...
extensions-builtin/Lora/network_full.py
0 → 100644
View file @
238adeaf
import
lyco_helpers
import
network
class
ModuleTypeFull
(
network
.
ModuleType
):
def
create_module
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
if
all
(
x
in
weights
.
w
for
x
in
[
"diff"
]):
return
NetworkModuleFull
(
net
,
weights
)
return
None
class
NetworkModuleFull
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
self
.
weight
=
weights
.
w
.
get
(
"diff"
)
def
calc_updown
(
self
,
orig_weight
):
output_shape
=
self
.
weight
.
shape
updown
=
self
.
weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
extensions-builtin/Lora/network_hada.py
View file @
238adeaf
import
lyco_helpers
import
network
import
network_lyco
class
ModuleTypeHada
(
network
.
ModuleType
):
...
...
@@ -11,7 +10,7 @@ class ModuleTypeHada(network.ModuleType):
return
None
class
NetworkModuleHada
(
network
_lyco
.
NetworkModuleLyco
):
class
NetworkModuleHada
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
...
...
extensions-builtin/Lora/network_ia3.py
View file @
238adeaf
import
network
import
network_lyco
class
ModuleTypeIa3
(
network
.
ModuleType
):
...
...
@@ -10,7 +9,7 @@ class ModuleTypeIa3(network.ModuleType):
return
None
class
NetworkModuleIa3
(
network
_lyco
.
NetworkModuleLyco
):
class
NetworkModuleIa3
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
...
...
extensions-builtin/Lora/network_lokr.py
View file @
238adeaf
...
...
@@ -2,7 +2,6 @@ import torch
import
lyco_helpers
import
network
import
network_lyco
class
ModuleTypeLokr
(
network
.
ModuleType
):
...
...
@@ -22,7 +21,7 @@ def make_kron(orig_shape, w1, w2):
return
torch
.
kron
(
w1
,
w2
)
.
reshape
(
orig_shape
)
class
NetworkModuleLokr
(
network
_lyco
.
NetworkModuleLyco
):
class
NetworkModuleLokr
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
...
...
extensions-builtin/Lora/network_lora.py
View file @
238adeaf
import
torch
import
lyco_helpers
import
network
from
modules
import
devices
...
...
@@ -16,29 +17,42 @@ class NetworkModuleLora(network.NetworkModule):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
self
.
up
=
self
.
create_module
(
weights
.
w
[
"lora_up.weight"
])
self
.
down
=
self
.
create_module
(
weights
.
w
[
"lora_down.weight"
])
self
.
alpha
=
weights
.
w
[
"alpha"
]
if
"alpha"
in
weights
.
w
else
None
self
.
up_model
=
self
.
create_module
(
weights
.
w
,
"lora_up.weight"
)
self
.
down_model
=
self
.
create_module
(
weights
.
w
,
"lora_down.weight"
)
self
.
mid_model
=
self
.
create_module
(
weights
.
w
,
"lora_mid.weight"
,
none_ok
=
True
)
self
.
dim
=
weights
.
w
[
"lora_down.weight"
]
.
shape
[
0
]
def
create_module
(
self
,
weights
,
key
,
none_ok
=
False
):
weight
=
weights
.
get
(
key
)
def
create_module
(
self
,
weight
,
none_ok
=
False
):
if
weight
is
None
and
none_ok
:
return
None
i
f
type
(
self
.
sd_module
)
==
torch
.
nn
.
Linear
:
module
=
torch
.
nn
.
Linear
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
bias
=
False
)
elif
type
(
self
.
sd_module
)
==
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
:
module
=
torch
.
nn
.
Linear
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
bias
=
False
)
elif
type
(
self
.
sd_module
)
==
torch
.
nn
.
MultiheadAttention
:
i
s_linear
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Linear
,
torch
.
nn
.
modules
.
linear
.
NonDynamicallyQuantizableLinear
,
torch
.
nn
.
MultiheadAttention
]
is_conv
=
type
(
self
.
sd_module
)
in
[
torch
.
nn
.
Conv2d
]
if
is_linear
:
weight
=
weight
.
reshape
(
weight
.
shape
[
0
],
-
1
)
module
=
torch
.
nn
.
Linear
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
bias
=
False
)
elif
type
(
self
.
sd_module
)
==
torch
.
nn
.
Conv2d
and
weight
.
shape
[
2
:]
==
(
1
,
1
):
elif
is_conv
and
key
==
"lora_down.weight"
or
key
==
"dyn_up"
:
if
len
(
weight
.
shape
)
==
2
:
weight
=
weight
.
reshape
(
weight
.
shape
[
0
],
-
1
,
1
,
1
)
if
weight
.
shape
[
2
]
!=
1
or
weight
.
shape
[
3
]
!=
1
:
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
self
.
sd_module
.
kernel_size
,
self
.
sd_module
.
stride
,
self
.
sd_module
.
padding
,
bias
=
False
)
else
:
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
(
1
,
1
),
bias
=
False
)
elif
is_conv
and
key
==
"lora_mid.weight"
:
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
self
.
sd_module
.
kernel_size
,
self
.
sd_module
.
stride
,
self
.
sd_module
.
padding
,
bias
=
False
)
elif
is_conv
and
key
==
"lora_up.weight"
or
key
==
"dyn_down"
:
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
(
1
,
1
),
bias
=
False
)
elif
type
(
self
.
sd_module
)
==
torch
.
nn
.
Conv2d
and
weight
.
shape
[
2
:]
==
(
3
,
3
):
module
=
torch
.
nn
.
Conv2d
(
weight
.
shape
[
1
],
weight
.
shape
[
0
],
(
3
,
3
),
bias
=
False
)
else
:
print
(
f
'Network layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}'
)
return
None
raise
AssertionError
(
f
'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}'
)
with
torch
.
no_grad
():
if
weight
.
shape
!=
module
.
weight
.
shape
:
weight
=
weight
.
reshape
(
module
.
weight
.
shape
)
module
.
weight
.
copy_
(
weight
)
module
.
to
(
device
=
devices
.
cpu
,
dtype
=
devices
.
dtype
)
...
...
@@ -46,25 +60,27 @@ class NetworkModuleLora(network.NetworkModule):
return
module
def
calc_updown
(
self
,
targe
t
):
up
=
self
.
up
.
weight
.
to
(
target
.
device
,
dtype
=
targe
t
.
dtype
)
down
=
self
.
down
.
weight
.
to
(
target
.
device
,
dtype
=
targe
t
.
dtype
)
def
calc_updown
(
self
,
orig_weigh
t
):
up
=
self
.
up
_model
.
weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weigh
t
.
dtype
)
down
=
self
.
down
_model
.
weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weigh
t
.
dtype
)
if
up
.
shape
[
2
:]
==
(
1
,
1
)
and
down
.
shape
[
2
:]
==
(
1
,
1
):
updown
=
(
up
.
squeeze
(
2
)
.
squeeze
(
2
)
@
down
.
squeeze
(
2
)
.
squeeze
(
2
))
.
unsqueeze
(
2
)
.
unsqueeze
(
3
)
elif
up
.
shape
[
2
:]
==
(
3
,
3
)
or
down
.
shape
[
2
:]
==
(
3
,
3
):
updown
=
torch
.
nn
.
functional
.
conv2d
(
down
.
permute
(
1
,
0
,
2
,
3
),
up
)
.
permute
(
1
,
0
,
2
,
3
)
output_shape
=
[
up
.
size
(
0
),
down
.
size
(
1
)]
if
self
.
mid_model
is
not
None
:
# cp-decomposition
mid
=
self
.
mid_model
.
weight
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
updown
=
lyco_helpers
.
rebuild_cp_decomposition
(
up
,
down
,
mid
)
output_shape
+=
mid
.
shape
[
2
:]
else
:
updown
=
up
@
down
updown
=
updown
*
self
.
network
.
multiplier
*
(
self
.
alpha
/
self
.
up
.
weight
.
shape
[
1
]
if
self
.
alpha
else
1.0
)
if
len
(
down
.
shape
)
==
4
:
output_shape
+=
down
.
shape
[
2
:]
updown
=
lyco_helpers
.
rebuild_conventional
(
up
,
down
,
output_shape
,
self
.
network
.
dyn_dim
)
return
updown
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
def
forward
(
self
,
x
,
y
):
self
.
up
.
to
(
device
=
devices
.
device
)
self
.
down
.
to
(
device
=
devices
.
device
)
self
.
up
_model
.
to
(
device
=
devices
.
device
)
self
.
down
_model
.
to
(
device
=
devices
.
device
)
return
y
+
self
.
up
(
self
.
down
(
x
))
*
self
.
network
.
multiplier
*
(
self
.
alpha
/
self
.
up
.
weight
.
shape
[
1
]
if
self
.
alpha
else
1.0
)
return
y
+
self
.
up
_model
(
self
.
down_model
(
x
))
*
self
.
multiplier
()
*
self
.
calc_scale
(
)
extensions-builtin/Lora/network_lyco.py
deleted
100644 → 0
View file @
46466f09
import
network
class
NetworkModuleLyco
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
if
hasattr
(
self
.
sd_module
,
'weight'
):
self
.
shape
=
self
.
sd_module
.
weight
.
shape
self
.
dim
=
None
self
.
bias
=
weights
.
w
.
get
(
"bias"
)
self
.
alpha
=
weights
.
w
[
"alpha"
]
.
item
()
if
"alpha"
in
weights
.
w
else
None
self
.
scale
=
weights
.
w
[
"scale"
]
.
item
()
if
"scale"
in
weights
.
w
else
None
def
finalize_updown
(
self
,
updown
,
orig_weight
,
output_shape
):
if
self
.
bias
is
not
None
:
updown
=
updown
.
reshape
(
self
.
bias
.
shape
)
updown
+=
self
.
bias
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
updown
=
updown
.
reshape
(
output_shape
)
if
len
(
output_shape
)
==
4
:
updown
=
updown
.
reshape
(
output_shape
)
if
orig_weight
.
size
()
.
numel
()
==
updown
.
size
()
.
numel
():
updown
=
updown
.
reshape
(
orig_weight
.
shape
)
scale
=
(
self
.
scale
if
self
.
scale
is
not
None
else
self
.
alpha
/
self
.
dim
if
self
.
dim
is
not
None
and
self
.
alpha
is
not
None
else
1.0
)
return
updown
*
scale
*
self
.
network
.
multiplier
extensions-builtin/Lora/networks.py
View file @
238adeaf
...
...
@@ -6,6 +6,7 @@ import network_lora
import
network_hada
import
network_ia3
import
network_lokr
import
network_full
import
torch
from
typing
import
Union
...
...
@@ -17,6 +18,7 @@ module_types = [
network_hada
.
ModuleTypeHada
(),
network_ia3
.
ModuleTypeIa3
(),
network_lokr
.
ModuleTypeLokr
(),
network_full
.
ModuleTypeFull
(),
]
...
...
@@ -52,6 +54,15 @@ def convert_diffusers_name_to_compvis(key, is_sd2):
m
=
[]
if
match
(
m
,
r"lora_unet_conv_in(.*)"
):
return
f
'diffusion_model_input_blocks_0_0{m[0]}'
if
match
(
m
,
r"lora_unet_conv_out(.*)"
):
return
f
'diffusion_model_out_2{m[0]}'
if
match
(
m
,
r"lora_unet_time_embedding_linear_(\d+)(.*)"
):
return
f
"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}"
if
match
(
m
,
r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"
):
suffix
=
suffix_conversion
.
get
(
m
[
1
],
{})
.
get
(
m
[
3
],
m
[
3
])
return
f
"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
...
...
@@ -179,7 +190,7 @@ def load_network(name, network_on_disk):
return
net
def
load_networks
(
names
,
multiplier
s
=
None
):
def
load_networks
(
names
,
te_multipliers
=
None
,
unet_multipliers
=
None
,
dyn_dim
s
=
None
):
already_loaded
=
{}
for
net
in
loaded_networks
:
...
...
@@ -218,7 +229,9 @@ def load_networks(names, multipliers=None):
print
(
f
"Couldn't find network with name {name}"
)
continue
net
.
multiplier
=
multipliers
[
i
]
if
multipliers
else
1.0
net
.
te_multiplier
=
te_multipliers
[
i
]
if
te_multipliers
else
1.0
net
.
unet_multiplier
=
unet_multipliers
[
i
]
if
unet_multipliers
else
1.0
net
.
dyn_dim
=
dyn_dims
[
i
]
if
dyn_dims
else
1.0
loaded_networks
.
append
(
net
)
if
failed_to_load_networks
:
...
...
@@ -250,7 +263,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
return
current_names
=
getattr
(
self
,
"network_current_names"
,
())
wanted_names
=
tuple
((
x
.
name
,
x
.
multiplier
)
for
x
in
loaded_networks
)
wanted_names
=
tuple
((
x
.
name
,
x
.
te_multiplier
,
x
.
unet_multiplier
,
x
.
dyn_dim
)
for
x
in
loaded_networks
)
weights_backup
=
getattr
(
self
,
"network_weights_backup"
,
None
)
if
weights_backup
is
None
:
...
...
@@ -288,9 +301,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
updown_k
=
module_k
.
calc_updown
(
self
.
in_proj_weight
)
updown_v
=
module_v
.
calc_updown
(
self
.
in_proj_weight
)
updown_qkv
=
torch
.
vstack
([
updown_q
,
updown_k
,
updown_v
])
updown_out
=
module_out
.
calc_updown
(
self
.
out_proj
.
weight
)
self
.
in_proj_weight
+=
updown_qkv
self
.
out_proj
.
weight
+=
module_out
.
calc_updown
(
self
.
out_proj
.
weight
)
self
.
out_proj
.
weight
+=
updown_out
continue
if
module
is
None
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment