Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
8111b556
Commit
8111b556
authored
Jan 03, 2023
by
brkirch
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add support for PyTorch nightly and local builds
parent
3bd73776
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
29 additions
and
6 deletions
+29
-6
modules/devices.py
modules/devices.py
+23
-5
webui.py
webui.py
+6
-1
No files found.
modules/devices.py
View file @
8111b556
...
...
@@ -133,8 +133,26 @@ def numpy_fix(self, *args, **kwargs):
return
orig_tensor_numpy
(
self
,
*
args
,
**
kwargs
)
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
if
has_mps
()
and
version
.
parse
(
torch
.
__version__
)
<
version
.
parse
(
"1.13"
):
torch
.
Tensor
.
to
=
tensor_to_fix
torch
.
nn
.
functional
.
layer_norm
=
layer_norm_fix
torch
.
Tensor
.
numpy
=
numpy_fix
# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
orig_cumsum
=
torch
.
cumsum
orig_Tensor_cumsum
=
torch
.
Tensor
.
cumsum
def
cumsum_fix
(
input
,
cumsum_func
,
*
args
,
**
kwargs
):
if
input
.
device
.
type
==
'mps'
:
output_dtype
=
kwargs
.
get
(
'dtype'
,
input
.
dtype
)
if
any
(
output_dtype
==
broken_dtype
for
broken_dtype
in
[
torch
.
bool
,
torch
.
int8
,
torch
.
int16
,
torch
.
int64
]):
return
cumsum_func
(
input
.
cpu
(),
*
args
,
**
kwargs
)
.
to
(
input
.
device
)
return
cumsum_func
(
input
,
*
args
,
**
kwargs
)
if
has_mps
():
if
version
.
parse
(
torch
.
__version__
)
<
version
.
parse
(
"1.13"
):
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
torch
.
Tensor
.
to
=
tensor_to_fix
torch
.
nn
.
functional
.
layer_norm
=
layer_norm_fix
torch
.
Tensor
.
numpy
=
numpy_fix
elif
version
.
parse
(
torch
.
__version__
)
>
version
.
parse
(
"1.13.1"
):
if
not
torch
.
Tensor
([
1
,
2
])
.
to
(
torch
.
device
(
"mps"
))
.
equal
(
torch
.
Tensor
([
1
,
1
])
.
to
(
torch
.
device
(
"mps"
))
.
cumsum
(
0
,
dtype
=
torch
.
int16
)):
torch
.
cumsum
=
lambda
input
,
*
args
,
**
kwargs
:
(
cumsum_fix
(
input
,
orig_cumsum
,
*
args
,
**
kwargs
)
)
torch
.
Tensor
.
cumsum
=
lambda
self
,
*
args
,
**
kwargs
:
(
cumsum_fix
(
self
,
orig_Tensor_cumsum
,
*
args
,
**
kwargs
)
)
orig_narrow
=
torch
.
narrow
torch
.
narrow
=
lambda
*
args
,
**
kwargs
:
(
orig_narrow
(
*
args
,
**
kwargs
)
.
clone
()
)
webui.py
View file @
8111b556
...
...
@@ -4,7 +4,7 @@ import threading
import
time
import
importlib
import
signal
import
threading
import
re
from
fastapi
import
FastAPI
from
fastapi.middleware.cors
import
CORSMiddleware
from
fastapi.middleware.gzip
import
GZipMiddleware
...
...
@@ -13,6 +13,11 @@ from modules import import_hook, errors
from
modules.call_queue
import
wrap_queued_call
,
queue_lock
,
wrap_gradio_gpu_call
from
modules.paths
import
script_path
import
torch
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
if
".dev"
in
torch
.
__version__
or
"+git"
in
torch
.
__version__
:
torch
.
__version__
=
re
.
search
(
r'[\d.]+'
,
torch
.
__version__
)
.
group
(
0
)
from
modules
import
shared
,
devices
,
sd_samplers
,
upscaler
,
extensions
,
localization
,
ui_tempdir
import
modules.codeformer_model
as
codeformer
import
modules.extras
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment