Commit 6785331e authored by AUTOMATIC's avatar AUTOMATIC

keep textual inversion dataset latents in CPU memory to save a bit of VRAM

parent c7543d49
...@@ -8,6 +8,7 @@ from torchvision import transforms ...@@ -8,6 +8,7 @@ from torchvision import transforms
import random import random
import tqdm import tqdm
from modules import devices
class PersonalizedBase(Dataset): class PersonalizedBase(Dataset):
...@@ -47,6 +48,7 @@ class PersonalizedBase(Dataset): ...@@ -47,6 +48,7 @@ class PersonalizedBase(Dataset):
torchdata = torch.moveaxis(torchdata, 2, 0) torchdata = torch.moveaxis(torchdata, 2, 0)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze() init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze()
init_latent = init_latent.to(devices.cpu)
self.dataset.append((init_latent, filename_tokens)) self.dataset.append((init_latent, filename_tokens))
......
...@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, ...@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
with torch.autocast("cuda"): with torch.autocast("cuda"):
c = cond_model([text]) c = cond_model([text])
x = x.to(devices.device)
loss = shared.sd_model(x.unsqueeze(0), c)[0] loss = shared.sd_model(x.unsqueeze(0), c)[0]
del x
losses[embedding.step % losses.shape[0]] = loss.item() losses[embedding.step % losses.shape[0]] = loss.item()
......
...@@ -1002,8 +1002,8 @@ def create_ui(wrap_gradio_gpu_call): ...@@ -1002,8 +1002,8 @@ def create_ui(wrap_gradio_gpu_call):
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0) steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
with gr.Row(): with gr.Row():
with gr.Column(scale=2): with gr.Column(scale=2):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment