Commit 1503edda authored by novelailab's avatar novelailab

Add support for longer clip contexts

parent fea743e5
......@@ -20,7 +20,7 @@ RUN pip3 install omegaconf transformers einops
#RUN pip3 install -e stable-diffusion/.
RUN pip3 install pytorch_lightning
RUN pip3 install -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
RUN wget -O stable-diffusion-private-hypernets.zip https://www.dropbox.com/s/cu1c0g9fdr8vii4/stable-diffusion-private-17cfb56e7460d78f633ed1ea725a5a24c66ee42c.zip?dl=1
RUN wget -O stable-diffusion-private-hypernets.zip https://www.dropbox.com/s/lbxm6oijjawmeew/stable-diffusion-private-buckets.zip?dl=1
RUN unzip stable-diffusion-private-hypernets.zip
RUN pip3 install -e stable-diffusion-private-17cfb56e7460d78f633ed1ea725a5a24c66ee42c/.
RUN pip3 install https://github.com/crowsonkb/k-diffusion/archive/481677d114f6ea445aa009cf5bd7a9cdee909e47.zip
......@@ -48,4 +48,4 @@ WORKDIR /usr/src/app
COPY . .
CMD [ "gunicorn", "main:app", "--workers 1", "--worker-class uvicorn.workers.UvicornWorker", "--bind 0.0.0.0:80", "--timeout 0", "--keep-alive 60", "--log-level=debug" ]
#CMD [ "python3", "main.py" ]
\ No newline at end of file
#CMD [ "python3", "main.py" ]
......@@ -148,6 +148,13 @@ def init_config_model():
config.prior_path = os.getenv('PRIOR_PATH', None)
config.default_config = os.getenv('DEFAULT_CONFIG', None)
config.quality_hack = os.getenv('QUALITY_HACK', "0")
config.clip_contexts = os.getenv('CLIP_CONTEXTS', "1")
try:
config.clip_contexts = int(config.clip_contexts)
if config.clip_contexts < 1 or config.clip_contexts > 10:
config.clip_contexts = 1
except:
config.clip_contexts = 1
# Misc settings
config.model_alias = os.getenv('MODEL_ALIAS')
......@@ -195,4 +202,4 @@ def init_config_model():
time_load = time.time() - load_time
logger.info(f"Models loaded in {time_load:.2f}s")
return model, config, model_hash
\ No newline at end of file
return model, config, model_hash
......@@ -207,6 +207,10 @@ class StableDiffusionModel(nn.Module):
model.cond_stage_model.return_layer = -2
model.cond_stage_model.do_final_ln = True
config.logger.info(f"CLIP: Using penultimate layer")
if self.config.clip_contexts > 1:
model.cond_stage_model.clip_extend = True
model.cond_stage_model.max_clip_extend = 75 * self.config.clip_contexts
model.cond_stage_model.inference_mode = True
self.k_model = K.external.CompVisDenoiser(model)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment