Commit ee7c2797 authored by nanahira's avatar nanahira

add full build and support all vram modes

parent ff28793e
Pipeline #17251 failed with stages
in 17 minutes and 27 seconds
......@@ -26,5 +26,6 @@ ENV ENABLE_EMA="1"
ENV VAE_PATH="models/animevae.pt"
ENV PENULTIMATE="1"
ENV PYTHONDONTWRITEBYTECODE=1
ENV VRAM_MODE="medvram"
CMD ["python3", "-m", "uvicorn", "--host", "0.0.0.0", "--port=6969", "main:app"]
ARG BASE=git-registry.mycard.moe/novelai-storage/naifu:latest
FROM node:lts-bullseye as frontend
RUN apt update && apt -y install git && npm install -g pnpm && \
RUN apt update && apt -y install wget tar git && npm install -g pnpm && \
git clone https://code.mycard.moe/novelai-storage/naifu-frontend.git /frontend && \
cd /frontend && pnpm install && pnpm run build
cd /frontend && pnpm install && pnpm run build && \
mkdir /models && cd /models && \
wget -O - https://cdn02.moecube.com:444/nanahira/naifu/models.tar.gz | tar -zxf -
FROM $BASE
COPY --from=frontend /frontend/build ./static
COPY --from=frontend /models ./models
ENV WITH_STATIC=1
......@@ -10,6 +10,7 @@ services:
- ./hydra_node:/app/hydra_node:ro
#- ./static:/app/static:ro
environment:
VRAM_MODE: lowvram
# TOKEN_SERVER: https://api.moecube.com/accounts/authUser
TOKEN: mycard
#WITH_STATIC: 1
# TOKEN: mycard
# WITH_STATIC: 1
......@@ -178,7 +178,11 @@ def init_config_model():
#attach it to the model
model.premodules = modules
lowvram.setup_for_low_vram(model.model, True)
vramMode = os.getenv("VRAM_MODE", "medvram")
if vramMode != "highvram":
lowvram.setup_for_low_vram(model.model, vramMode == "medvram")
else:
model.model.to(torch.device("cuda"))
config.model = model
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment