Commit d213d6ca authored by AUTOMATIC's avatar AUTOMATIC

removed the option to use 2x more memory when generating previews

added an option to always only show one image in previews
removed duplicate code
parent 4fdb53c1
...@@ -71,6 +71,7 @@ sampler_extra_params = { ...@@ -71,6 +71,7 @@ sampler_extra_params = {
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
} }
def setup_img2img_steps(p, steps=None): def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None: if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0 steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
...@@ -82,37 +83,21 @@ def setup_img2img_steps(p, steps=None): ...@@ -82,37 +83,21 @@ def setup_img2img_steps(p, steps=None):
return steps, t_enc return steps, t_enc
def sample_to_image(samples): def single_sample_to_image(sample):
x_sample = processing.decode_first_stage(shared.sd_model, samples[0:1])[0] x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8) x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample) return Image.fromarray(x_sample)
def sample_to_image(samples):
return single_sample_to_image(samples[0])
def samples_to_image_grid(samples): def samples_to_image_grid(samples):
progress_images = [] return images.image_grid([single_sample_to_image(sample) for sample in samples])
for i in range(len(samples)):
# Decode the samples individually to reduce VRAM usage at the cost of a bit of speed.
x_sample = processing.decode_first_stage(shared.sd_model, samples[i:i+1])[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
progress_images.append(Image.fromarray(x_sample))
return images.image_grid(progress_images)
def samples_to_image_grid_combined(samples):
progress_images = []
# Decode all samples at once to increase speed at the cost of VRAM usage.
x_samples = processing.decode_first_stage(shared.sd_model, samples)
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
for x_sample in x_samples:
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
progress_images.append(Image.fromarray(x_sample))
return images.image_grid(progress_images)
def store_latent(decoded): def store_latent(decoded):
state.current_latent = decoded state.current_latent = decoded
......
...@@ -294,7 +294,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), ...@@ -294,7 +294,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"), "show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"progress_decode_combined": OptionInfo(False, "Decode all progress images at once. (Slighty speeds up progress generation but consumes significantly more VRAM with large batches.)"), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"), "return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
......
...@@ -318,10 +318,10 @@ def check_progress_call(id_part): ...@@ -318,10 +318,10 @@ def check_progress_call(id_part):
if shared.parallel_processing_allowed: if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None: if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
if opts.progress_decode_combined: if opts.show_progress_grid:
shared.state.current_image = modules.sd_samplers.samples_to_image_grid_combined(shared.state.current_latent)
else:
shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent) shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
else:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image image = shared.state.current_image
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment