@@ -53,16 +53,16 @@ parser.add_argument("--xformers", action='store_true', help="enable xformers for
parser.add_argument("--force-enable-xformers",action='store_true',help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--xformers-flash-attention",action='store_true',help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru",action='store_true',help="does not do anything")
parser.add_argument("--opt-split-attention",action='store_true',help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-split-attention",action='store_true',help="prefer Doggettx's cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--opt-sub-quad-attention",action='store_true',help="prefer memory efficient sub-quadratic cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--sub-quad-q-chunk-size",type=int,help="query chunk size for the sub-quadratic cross-attention layer optimization to use",default=1024)
parser.add_argument("--sub-quad-kv-chunk-size",type=int,help="kv chunk size for the sub-quadratic cross-attention layer optimization to use",default=None)
parser.add_argument("--sub-quad-chunk-threshold",type=int,help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking",default=None)
parser.add_argument("--opt-split-attention-invokeai",action='store_true',help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1",action='store_true',help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--opt-split-attention-invokeai",action='store_true',help="prefer InvokeAI's cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--opt-split-attention-v1",action='store_true',help="prefer older version of split attention optimization for automatic choice of optimization")
parser.add_argument("--opt-sdp-attention",action='store_true',help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention",action='store_true',help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention",action='store_true',help="does not do anything")
parser.add_argument("--disable-nan-check",action='store_true',help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu",nargs='+',help="use CPU as torch device for specified modules",default=[],type=str.lower)
parser.add_argument("--listen",action='store_true',help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
can_use_sdp=hasattr(torch.nn.functional,"scaled_dot_product_attention")andcallable(torch.nn.functional.scaled_dot_product_attention)# not everyone has torch 2.x to use sdp
"s_min_uncond":OptionInfo(0,"Negative Guidance minimum sigma",gr.Slider,{"minimum":0.0,"maximum":4.0,"step":0.01}).link("PR","https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio_img2img":OptionInfo(0.0,"Token merging ratio for img2img",gr.Slider,{"minimum":0.0,"maximum":0.9,"step":0.1}).info("only applies if non-zero and overrides above"),