Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
14978420
Commit
14978420
authored
Oct 30, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
rework #3722 to not introduce duplicate code
parent
060ee5d3
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
35 additions
and
49 deletions
+35
-49
modules/api/api.py
modules/api/api.py
+13
-30
modules/shared.py
modules/shared.py
+19
-3
webui.py
webui.py
+3
-16
No files found.
modules/api/api.py
View file @
14978420
...
...
@@ -9,31 +9,6 @@ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusion
from
modules.sd_samplers
import
all_samplers
from
modules.extras
import
run_extras
,
run_pnginfo
# copy from wrap_gradio_gpu_call of webui.py
# because queue lock will be acquired in api handlers
# and time start needs to be set
# the function has been modified into two parts
def
before_gpu_call
():
devices
.
torch_gc
()
shared
.
state
.
sampling_step
=
0
shared
.
state
.
job_count
=
-
1
shared
.
state
.
job_no
=
0
shared
.
state
.
job_timestamp
=
shared
.
state
.
get_job_timestamp
()
shared
.
state
.
current_latent
=
None
shared
.
state
.
current_image
=
None
shared
.
state
.
current_image_sampling_step
=
0
shared
.
state
.
skipped
=
False
shared
.
state
.
interrupted
=
False
shared
.
state
.
textinfo
=
None
shared
.
state
.
time_start
=
time
.
time
()
def
after_gpu_call
():
shared
.
state
.
job
=
""
shared
.
state
.
job_count
=
0
devices
.
torch_gc
()
def
upscaler_to_index
(
name
:
str
):
try
:
...
...
@@ -41,8 +16,10 @@ def upscaler_to_index(name: str):
except
:
raise
HTTPException
(
status_code
=
400
,
detail
=
f
"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}"
)
sampler_to_index
=
lambda
name
:
next
(
filter
(
lambda
row
:
name
.
lower
()
==
row
[
1
]
.
name
.
lower
(),
enumerate
(
all_samplers
)),
None
)
def
setUpscalers
(
req
:
dict
):
reqDict
=
vars
(
req
)
reqDict
[
'extras_upscaler_1'
]
=
upscaler_to_index
(
req
.
upscaler_1
)
...
...
@@ -51,6 +28,7 @@ def setUpscalers(req: dict):
reqDict
.
pop
(
'upscaler_2'
)
return
reqDict
class
Api
:
def
__init__
(
self
,
app
,
queue_lock
):
self
.
router
=
APIRouter
()
...
...
@@ -78,10 +56,13 @@ class Api:
)
p
=
StableDiffusionProcessingTxt2Img
(
**
vars
(
populate
))
# Override object param
before_gpu_call
()
shared
.
state
.
begin
()
with
self
.
queue_lock
:
processed
=
process_images
(
p
)
after_gpu_call
()
shared
.
state
.
end
()
b64images
=
list
(
map
(
encode_pil_to_base64
,
processed
.
images
))
...
...
@@ -119,11 +100,13 @@ class Api:
imgs
=
[
img
]
*
p
.
batch_size
p
.
init_images
=
imgs
# Override object param
before_gpu_call
()
shared
.
state
.
begin
()
with
self
.
queue_lock
:
processed
=
process_images
(
p
)
after_gpu_call
()
shared
.
state
.
end
()
b64images
=
list
(
map
(
encode_pil_to_base64
,
processed
.
images
))
...
...
modules/shared.py
View file @
14978420
...
...
@@ -144,9 +144,6 @@ class State:
self
.
sampling_step
=
0
self
.
current_image_sampling_step
=
0
def
get_job_timestamp
(
self
):
return
datetime
.
datetime
.
now
()
.
strftime
(
"
%
Y
%
m
%
d
%
H
%
M
%
S"
)
# shouldn't this return job_timestamp?
def
dict
(
self
):
obj
=
{
"skipped"
:
self
.
skipped
,
...
...
@@ -160,6 +157,25 @@ class State:
return
obj
def
begin
(
self
):
self
.
sampling_step
=
0
self
.
job_count
=
-
1
self
.
job_no
=
0
self
.
job_timestamp
=
datetime
.
datetime
.
now
()
.
strftime
(
"
%
Y
%
m
%
d
%
H
%
M
%
S"
)
self
.
current_latent
=
None
self
.
current_image
=
None
self
.
current_image_sampling_step
=
0
self
.
skipped
=
False
self
.
interrupted
=
False
self
.
textinfo
=
None
devices
.
torch_gc
()
def
end
(
self
):
self
.
job
=
""
self
.
job_count
=
0
devices
.
torch_gc
()
state
=
State
()
...
...
webui.py
View file @
14978420
...
...
@@ -46,26 +46,13 @@ def wrap_queued_call(func):
def
wrap_gradio_gpu_call
(
func
,
extra_outputs
=
None
):
def
f
(
*
args
,
**
kwargs
):
devices
.
torch_gc
()
shared
.
state
.
sampling_step
=
0
shared
.
state
.
job_count
=
-
1
shared
.
state
.
job_no
=
0
shared
.
state
.
job_timestamp
=
shared
.
state
.
get_job_timestamp
()
shared
.
state
.
current_latent
=
None
shared
.
state
.
current_image
=
None
shared
.
state
.
current_image_sampling_step
=
0
shared
.
state
.
skipped
=
False
shared
.
state
.
interrupted
=
False
shared
.
state
.
textinfo
=
None
shared
.
state
.
begin
()
with
queue_lock
:
res
=
func
(
*
args
,
**
kwargs
)
shared
.
state
.
job
=
""
shared
.
state
.
job_count
=
0
devices
.
torch_gc
()
shared
.
state
.
end
()
return
res
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment