description:Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
# where the license is as follows:
#
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
parser.add_argument("--skip-torch-cuda-test",action='store_true',help="launch.py argument: do not check if CUDA is able to work properly")
parser.add_argument("--reinstall-xformers",action='store_true',help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
parser.add_argument("--reinstall-torch",action='store_true',help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
parser.add_argument("--update-check",action='store_true',help="launch.py argument: chck for updates at startup")
parser.add_argument("--update-check",action='store_true',help="launch.py argument: check for updates at startup")
parser.add_argument("--test-server",action='store_true',help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment",action='store_true',help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install",action='store_true',help="launch.py argument: skip installation of packages")
"comma_padding_backtrack":OptionInfo(20,"Prompt word wrap length limit",gr.Slider,{"minimum":0,"maximum":74,"step":1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers":OptionInfo(1,"Clip skip",gr.Slider,{"minimum":1,"maximum":12,"step":1}).link("wiki","https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"),
"upcast_attn":OptionInfo(False,"Upcast cross attention layer to float32"),
"randn_source":OptionInfo("GPU","Random number generator source.",gr.Radio,{"choices":["GPU","CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"),
"randn_source":OptionInfo("GPU","Random number generator source.",gr.Radio,{"choices":["GPU","CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
"s_min_uncond":OptionInfo(0,"Negative Guidance minimum sigma",gr.Slider,{"minimum":0.0,"maximum":4.0,"step":0.01}).link("PR","https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"s_min_uncond":OptionInfo(0.0,"Negative Guidance minimum sigma",gr.Slider,{"minimum":0.0,"maximum":4.0,"step":0.01}).link("PR","https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio_img2img":OptionInfo(0.0,"Token merging ratio for img2img",gr.Slider,{"minimum":0.0,"maximum":0.9,"step":0.1}).info("only applies if non-zero and overrides above"),
"token_merging_ratio_hr":OptionInfo(0.0,"Token merging ratio for high-res pass",gr.Slider,{"minimum":0.0,"maximum":0.9,"step":0.1}).info("only applies if non-zero and overrides above"),
hr_prompt=gr.Textbox(label="Prompt",elem_id="hires_prompt",show_label=False,lines=3,placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.",elem_classes=["prompt"])
hr_prompt=gr.Textbox(label="Hires prompt",elem_id="hires_prompt",show_label=False,lines=3,placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.",elem_classes=["prompt"])
withgr.Column(scale=80):
withgr.Row():
hr_negative_prompt=gr.Textbox(label="Negative prompt",elem_id="hires_neg_prompt",show_label=False,lines=3,placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.",elem_classes=["prompt"])
hr_negative_prompt=gr.Textbox(label="Hires negative prompt",elem_id="hires_neg_prompt",show_label=False,lines=3,placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.",elem_classes=["prompt"])
elifcategory=="batch":
ifnotopts.dimensions_and_batch_together:
...
...
@@ -1753,8 +1752,7 @@ def create_ui():
try:
results=modules.extras.run_modelmerger(*args)
exceptExceptionase:
print("Error loading/saving model file:",file=sys.stderr)
print(traceback.format_exc(),file=sys.stderr)
print_error("Error loading/saving model file",exc_info=True)
modules.sd_models.list_models()# to remove the potentially missing models from the list