Commit f300d0f2 authored by yfszzx's avatar yfszzx

Merge branch 'Inspiron' of https://github.com/yfszzx/stable-diffusion-webui-plus into Inspiron

parents 9ba439b5 5bfa2b23
......@@ -62,8 +62,8 @@ titles = {
"Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
"Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.",
"Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.",
"Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback": "Process an image, use it as an input, repeat.",
......
......@@ -108,6 +108,9 @@ function processNode(node){
function dumpTranslations(){
dumped = {}
if (localization.rtl) {
dumped.rtl = true
}
Object.keys(original_lines).forEach(function(text){
if(dumped[text] !== undefined) return
......@@ -129,6 +132,24 @@ onUiUpdate(function(m){
document.addEventListener("DOMContentLoaded", function() {
processNode(gradioApp())
if (localization.rtl) { // if the language is from right to left,
(new MutationObserver((mutations, observer) => { // wait for the style to load
mutations.forEach(mutation => {
mutation.addedNodes.forEach(node => {
if (node.tagName === 'STYLE') {
observer.disconnect();
for (const x of node.sheet.rules) { // find all rtl media rules
if (Array.from(x.media || []).includes('rtl')) {
x.media.appendMedium('all'); // enable them
}
}
}
})
});
})).observe(gradioApp(), { childList: true });
}
})
function download_localization() {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -45,7 +45,7 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = None
device = device_interrogate = device_gfpgan = device_swinir = device_esrgan = device_scunet = device_codeformer = None
dtype = torch.float16
dtype_vae = torch.float16
......@@ -81,3 +81,7 @@ def autocast(disable=False):
return contextlib.nullcontext()
return torch.autocast("cuda")
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
def mps_contiguous(input_tensor, device): return input_tensor.contiguous() if device.type == 'mps' else input_tensor
def mps_contiguous_to(input_tensor, device): return mps_contiguous(input_tensor, device).to(device)
......@@ -190,7 +190,7 @@ def upscale_without_tiling(model, img):
img = img[:, :, ::-1]
img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
img = torch.from_numpy(img).float()
img = img.unsqueeze(0).to(devices.device_esrgan)
img = devices.mps_contiguous_to(img.unsqueeze(0), devices.device_esrgan)
with torch.no_grad():
output = model(img)
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
......
......@@ -16,7 +16,7 @@ from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
from modules import sd_samplers, shared
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
......@@ -344,7 +344,10 @@ class FilenameGenerator:
time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 else self.default_time_format
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
......@@ -474,8 +477,10 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if forced_filename is None:
if short_filename or seed is None:
file_decoration = ""
else:
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
add_number = opts.save_images_add_number or file_decoration == ''
......@@ -536,6 +541,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
else:
txt_fullfn = None
script_callbacks.image_saved_callback(image, p, fullfn, txt_fullfn)
return fullfn, txt_fullfn
......
import sys
import traceback
from collections import namedtuple
import inspect
def report_exception(c, job):
print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_model_loaded = []
callbacks_ui_tabs = []
callbacks_ui_settings = []
callbacks_image_saved = []
def clear_callbacks():
callbacks_model_loaded.clear()
callbacks_ui_tabs.clear()
callbacks_image_saved.clear()
def model_loaded_callback(sd_model):
for callback in callbacks_model_loaded:
callback(sd_model)
for c in callbacks_model_loaded:
try:
c.callback(sd_model)
except Exception:
report_exception(c, 'model_loaded_callback')
def ui_tabs_callback():
res = []
for callback in callbacks_ui_tabs:
res += callback() or []
for c in callbacks_ui_tabs:
try:
res += c.callback() or []
except Exception:
report_exception(c, 'ui_tabs_callback')
return res
def ui_settings_callback():
for callback in callbacks_ui_settings:
callback()
for c in callbacks_ui_settings:
try:
c.callback()
except Exception:
report_exception(c, 'ui_settings_callback')
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
callbacks.append(ScriptCallback(filename, fun))
def image_saved_callback(image, p, fullfn, txt_fullfn):
for callback in callbacks_image_saved:
callback(image, p, fullfn, txt_fullfn)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
callbacks_model_loaded.append(callback)
add_callback(callbacks_model_loaded, callback)
def on_ui_tabs(callback):
......@@ -44,10 +76,15 @@ def on_ui_tabs(callback):
title is tab text displayed to user in the UI
elem_id is HTML id for the tab
"""
callbacks_ui_tabs.append(callback)
add_callback(callbacks_ui_tabs, callback)
def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """
callbacks_ui_settings.append(callback)
def on_save_imaged(callback):
"""register a function to call after modules.images.save_image is called returning same values, original image and p """
callbacks_image_saved.append(callback)
......@@ -54,9 +54,8 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
img = img.unsqueeze(0).to(device)
img = devices.mps_contiguous_to(img.unsqueeze(0), device)
img = img.to(device)
with torch.no_grad():
output = model(img)
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
......
......@@ -228,7 +228,7 @@ class VanillaStableDiffusionSampler:
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(steps, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
......@@ -429,7 +429,7 @@ class KDiffusionSampler:
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
......
......@@ -58,7 +58,7 @@ parser.add_argument("--opt-split-attention", action='store_true', help="force-en
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
......@@ -97,8 +97,8 @@ restricted_opts = [
"outdir_save",
]
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'])
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
......
......@@ -7,8 +7,8 @@ from PIL import Image
from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader
from modules.shared import cmd_opts, opts, device
from modules import modelloader, devices
from modules.shared import cmd_opts, opts
from modules.swinir_model_arch import SwinIR as net
from modules.swinir_model_arch_v2 import Swin2SR as net2
from modules.upscaler import Upscaler, UpscalerData
......@@ -42,7 +42,7 @@ class UpscalerSwinIR(Upscaler):
model = self.load_model(model_file)
if model is None:
return img
model = model.to(device)
model = model.to(devices.device_swinir)
img = upscale(img, model)
try:
torch.cuda.empty_cache()
......@@ -111,7 +111,7 @@ def upscale(
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
img = img.unsqueeze(0).to(device)
img = devices.mps_contiguous_to(img.unsqueeze(0), devices.device_swinir)
with torch.no_grad(), precision_scope("cuda"):
_, _, h_old, w_old = img.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
......@@ -139,8 +139,8 @@ def inference(img, model, tile, tile_overlap, window_size, scale):
stride = tile - tile_overlap
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
E = torch.zeros(b, c, h * sf, w * sf, dtype=torch.half, device=device).type_as(img)
W = torch.zeros_like(E, dtype=torch.half, device=device)
E = torch.zeros(b, c, h * sf, w * sf, dtype=torch.half, device=devices.device_swinir).type_as(img)
W = torch.zeros_like(E, dtype=torch.half, device=devices.device_swinir)
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
for h_idx in h_idx_list:
......
......@@ -515,3 +515,77 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
max-height: 480px !important;
min-height: 480px !important;
}
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
If you change anything above, you need to make sure it is RTL compliant by just running
your changes through converters like https://cssjanus.github.io/ or https://rtlcss.com/.
Then, you will need to add the RTL counterpart only if needed in the rtl section below.*/
@media rtl {
/* this part was manualy added */
:host {
direction: rtl;
}
.output-html:has(.performance), .gr-text-input {
direction: ltr;
}
.gr-radio, .gr-checkbox{
margin-left: 0.25em;
}
/* this part was automatically generated with few manual modifications */
.performance .time {
margin-right: unset;
margin-left: 0;
}
.justify-center.overflow-x-scroll {
justify-content: right;
}
.justify-center.overflow-x-scroll button:first-of-type {
margin-left: unset;
margin-right: auto;
}
.justify-center.overflow-x-scroll button:last-of-type {
margin-right: unset;
margin-left: auto;
}
#settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{
margin-right: unset;
margin-left: 8em;
}
#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
right: unset;
left: 0;
}
.progressDiv .progress{
padding: 0 0 0 8px;
text-align: left;
}
#lightboxModal{
left: unset;
right: 0;
}
.modalPrev, .modalNext{
border-radius: 3px 0 0 3px;
}
.modalNext {
right: unset;
left: 0;
border-radius: 0 3px 3px 0;
}
#imageARPreview{
left:unset;
right:0px;
}
#txt2img_skip, #img2img_skip{
right: unset;
left: 0px;
}
#context-menu{
box-shadow:-1px 1px 2px #CE6400;
}
.gr-box > div > div > input.gr-text-input{
right: unset;
left: 0.5em;
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment