Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
ec718f76
Commit
ec718f76
authored
Oct 17, 2023
by
v0xie
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
wip incorrect OFT implementation
parent
861cbd56
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
87 additions
and
0 deletions
+87
-0
extensions-builtin/Lora/network_oft.py
extensions-builtin/Lora/network_oft.py
+82
-0
extensions-builtin/Lora/networks.py
extensions-builtin/Lora/networks.py
+5
-0
No files found.
extensions-builtin/Lora/network_oft.py
0 → 100644
View file @
ec718f76
import
torch
import
network
class
ModuleTypeOFT
(
network
.
ModuleType
):
def
create_module
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
if
all
(
x
in
weights
.
w
for
x
in
[
"oft_blocks"
]):
return
NetworkModuleOFT
(
net
,
weights
)
return
None
# adapted from https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
class
NetworkModuleOFT
(
network
.
NetworkModule
):
def
__init__
(
self
,
net
:
network
.
Network
,
weights
:
network
.
NetworkWeights
):
super
()
.
__init__
(
net
,
weights
)
self
.
oft_blocks
=
weights
.
w
[
"oft_blocks"
]
self
.
alpha
=
weights
.
w
[
"alpha"
]
self
.
dim
=
self
.
oft_blocks
.
shape
[
0
]
self
.
num_blocks
=
self
.
dim
#if type(self.alpha) == torch.Tensor:
# self.alpha = self.alpha.detach().numpy()
if
"Linear"
in
self
.
sd_module
.
__class__
.
__name__
:
self
.
out_dim
=
self
.
sd_module
.
out_features
elif
"Conv"
in
self
.
sd_module
.
__class__
.
__name__
:
self
.
out_dim
=
self
.
sd_module
.
out_channels
self
.
constraint
=
self
.
alpha
*
self
.
out_dim
self
.
block_size
=
self
.
out_dim
//
self
.
num_blocks
self
.
oft_multiplier
=
self
.
multiplier
()
# replace forward method of original linear rather than replacing the module
# self.org_forward = self.sd_module.forward
# self.sd_module.forward = self.forward
def
get_weight
(
self
):
block_Q
=
self
.
oft_blocks
-
self
.
oft_blocks
.
transpose
(
1
,
2
)
norm_Q
=
torch
.
norm
(
block_Q
.
flatten
())
new_norm_Q
=
torch
.
clamp
(
norm_Q
,
max
=
self
.
constraint
)
block_Q
=
block_Q
*
((
new_norm_Q
+
1e-8
)
/
(
norm_Q
+
1e-8
))
I
=
torch
.
eye
(
self
.
block_size
,
device
=
self
.
oft_blocks
.
device
)
.
unsqueeze
(
0
)
.
repeat
(
self
.
num_blocks
,
1
,
1
)
block_R
=
torch
.
matmul
(
I
+
block_Q
,
(
I
-
block_Q
)
.
inverse
())
block_R_weighted
=
self
.
oft_multiplier
*
block_R
+
(
1
-
self
.
oft_multiplier
)
*
I
R
=
torch
.
block_diag
(
*
block_R_weighted
)
return
R
def
calc_updown
(
self
,
orig_weight
):
oft_blocks
=
self
.
oft_blocks
.
to
(
orig_weight
.
device
,
dtype
=
orig_weight
.
dtype
)
block_Q
=
oft_blocks
-
oft_blocks
.
transpose
(
1
,
2
)
norm_Q
=
torch
.
norm
(
block_Q
.
flatten
())
new_norm_Q
=
torch
.
clamp
(
norm_Q
,
max
=
self
.
constraint
)
block_Q
=
block_Q
*
((
new_norm_Q
+
1e-8
)
/
(
norm_Q
+
1e-8
))
I
=
torch
.
eye
(
self
.
block_size
,
device
=
oft_blocks
.
device
)
.
unsqueeze
(
0
)
.
repeat
(
self
.
num_blocks
,
1
,
1
)
block_R
=
torch
.
matmul
(
I
+
block_Q
,
(
I
-
block_Q
)
.
inverse
())
block_R_weighted
=
self
.
oft_multiplier
*
block_R
+
(
1
-
self
.
oft_multiplier
)
*
I
R
=
torch
.
block_diag
(
*
block_R_weighted
)
#R = self.get_weight().to(orig_weight.device, dtype=orig_weight.dtype)
# W = R*W_0
updown
=
orig_weight
+
R
output_shape
=
[
R
.
size
(
0
),
orig_weight
.
size
(
1
)]
return
self
.
finalize_updown
(
updown
,
orig_weight
,
output_shape
)
# def forward(self, x, y=None):
# x = self.org_forward(x)
# if self.oft_multiplier == 0.0:
# return x
# R = self.get_weight().to(x.device, dtype=x.dtype)
# if x.dim() == 4:
# x = x.permute(0, 2, 3, 1)
# x = torch.matmul(x, R)
# x = x.permute(0, 3, 1, 2)
# else:
# x = torch.matmul(x, R)
# return x
extensions-builtin/Lora/networks.py
View file @
ec718f76
...
...
@@ -11,6 +11,7 @@ import network_ia3
import
network_lokr
import
network_full
import
network_norm
import
network_oft
import
torch
from
typing
import
Union
...
...
@@ -28,6 +29,7 @@ module_types = [
network_full
.
ModuleTypeFull
(),
network_norm
.
ModuleTypeNorm
(),
network_glora
.
ModuleTypeGLora
(),
network_oft
.
ModuleTypeOFT
(),
]
...
...
@@ -183,6 +185,9 @@ def load_network(name, network_on_disk):
elif
sd_module
is
None
and
"lora_te1_text_model"
in
key_network_without_network_parts
:
key
=
key_network_without_network_parts
.
replace
(
"lora_te1_text_model"
,
"0_transformer_text_model"
)
sd_module
=
shared
.
sd_model
.
network_layer_mapping
.
get
(
key
,
None
)
elif
sd_module
is
None
and
"oft_unet"
in
key_network_without_network_parts
:
key
=
key_network_without_network_parts
.
replace
(
"oft_unet"
,
"diffusion_model"
)
sd_module
=
shared
.
sd_model
.
network_layer_mapping
.
get
(
key
,
None
)
# some SD1 Loras also have correct compvis keys
if
sd_module
is
None
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment