From d2c1e551d70bdde109040af991816e77f21418f6 Mon Sep 17 00:00:00 2001 From: Simon Oelerich <54176035+gmasil@users.noreply.github.com> Date: Tue, 4 Apr 2023 18:55:14 +0200 Subject: [PATCH 1/4] Enable ControlNet mounts for AUTOMATIC1111 (#385) The ControlNet addon [sd-webui-controlnet](https://github.com/Mikubill/sd-webui-controlnet) requires the `data/ControlNet` folder to be mounted into `models/ControlNet`. This PR enables said mount and adds the ControlNet folder to `.gitignore` file. --------- Co-authored-by: AbdBarho --- data/.gitignore | 1 + docker-compose.yml | 2 +- services/AUTOMATIC1111/entrypoint.sh | 1 + services/download/download.sh | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/data/.gitignore b/data/.gitignore index cb0a526..0c13125 100644 --- a/data/.gitignore +++ b/data/.gitignore @@ -20,3 +20,4 @@ /VAE /embeddings /Lora +/ControlNet diff --git a/docker-compose.yml b/docker-compose.yml index b6c1058..d9043f4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -28,7 +28,7 @@ services: <<: *base_service profiles: ["auto"] build: ./services/AUTOMATIC1111 - image: sd-auto:49 + image: sd-auto:50 environment: - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api diff --git a/services/AUTOMATIC1111/entrypoint.sh b/services/AUTOMATIC1111/entrypoint.sh index 9463baf..a6e7e04 100755 --- a/services/AUTOMATIC1111/entrypoint.sh +++ b/services/AUTOMATIC1111/entrypoint.sh @@ -35,6 +35,7 @@ MOUNTS["${ROOT}/models/torch_deepdanbooru"]="/data/Deepdanbooru" MOUNTS["${ROOT}/models/BLIP"]="/data/BLIP" MOUNTS["${ROOT}/models/midas"]="/data/MiDaS" MOUNTS["${ROOT}/models/Lora"]="/data/Lora" +MOUNTS["${ROOT}/models/ControlNet"]="/data/ControlNet" MOUNTS["${ROOT}/embeddings"]="/data/embeddings" MOUNTS["${ROOT}/config.json"]="/data/config/auto/config.json" diff --git a/services/download/download.sh b/services/download/download.sh index 6f02f9b..15a29ba 100755 --- a/services/download/download.sh +++ b/services/download/download.sh @@ -3,7 +3,7 @@ set -Eeuo pipefail # TODO: maybe just use the .gitignore file to create all of these -mkdir -vp /data/.cache /data/StableDiffusion /data/Codeformer /data/GFPGAN /data/ESRGAN /data/BSRGAN /data/RealESRGAN /data/SwinIR /data/LDSR /data/ScuNET /data/embeddings /data/VAE /data/Deepdanbooru /data/MiDaS /data/Lora +mkdir -vp /data/.cache /data/StableDiffusion /data/Codeformer /data/GFPGAN /data/ESRGAN /data/BSRGAN /data/RealESRGAN /data/SwinIR /data/LDSR /data/ScuNET /data/embeddings /data/VAE /data/Deepdanbooru /data/MiDaS /data/Lora /data/ControlNet echo "Downloading, this might take a while..." From 5d379bf7bc19e5e672651818587c6b99ab75404b Mon Sep 17 00:00:00 2001 From: Simon Oelerich <54176035+gmasil@users.noreply.github.com> Date: Wed, 5 Apr 2023 19:09:07 +0200 Subject: [PATCH 2/4] Add mounts for `openpose` (#387) Upon enabling the ControlNet addon from https://github.com/AbdBarho/stable-diffusion-webui-docker/pull/385 one might want to use the `openpose` preprocessors. Those are downloaded by the addon the first time they are used. Without proper mounts those networks will be downloaded on usage after each container start. This PR enables those mounts to reduce data traffic. --- data/.gitignore | 1 + docker-compose.yml | 2 +- services/AUTOMATIC1111/entrypoint.sh | 1 + services/download/download.sh | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/data/.gitignore b/data/.gitignore index 0c13125..5194313 100644 --- a/data/.gitignore +++ b/data/.gitignore @@ -21,3 +21,4 @@ /embeddings /Lora /ControlNet +/openpose diff --git a/docker-compose.yml b/docker-compose.yml index d9043f4..e005e77 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -28,7 +28,7 @@ services: <<: *base_service profiles: ["auto"] build: ./services/AUTOMATIC1111 - image: sd-auto:50 + image: sd-auto:51 environment: - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api diff --git a/services/AUTOMATIC1111/entrypoint.sh b/services/AUTOMATIC1111/entrypoint.sh index a6e7e04..c6df995 100755 --- a/services/AUTOMATIC1111/entrypoint.sh +++ b/services/AUTOMATIC1111/entrypoint.sh @@ -36,6 +36,7 @@ MOUNTS["${ROOT}/models/BLIP"]="/data/BLIP" MOUNTS["${ROOT}/models/midas"]="/data/MiDaS" MOUNTS["${ROOT}/models/Lora"]="/data/Lora" MOUNTS["${ROOT}/models/ControlNet"]="/data/ControlNet" +MOUNTS["${ROOT}/models/openpose"]="/data/openpose" MOUNTS["${ROOT}/embeddings"]="/data/embeddings" MOUNTS["${ROOT}/config.json"]="/data/config/auto/config.json" diff --git a/services/download/download.sh b/services/download/download.sh index 15a29ba..54176d0 100755 --- a/services/download/download.sh +++ b/services/download/download.sh @@ -3,7 +3,7 @@ set -Eeuo pipefail # TODO: maybe just use the .gitignore file to create all of these -mkdir -vp /data/.cache /data/StableDiffusion /data/Codeformer /data/GFPGAN /data/ESRGAN /data/BSRGAN /data/RealESRGAN /data/SwinIR /data/LDSR /data/ScuNET /data/embeddings /data/VAE /data/Deepdanbooru /data/MiDaS /data/Lora /data/ControlNet +mkdir -vp /data/.cache /data/StableDiffusion /data/Codeformer /data/GFPGAN /data/ESRGAN /data/BSRGAN /data/RealESRGAN /data/SwinIR /data/LDSR /data/ScuNET /data/embeddings /data/VAE /data/Deepdanbooru /data/MiDaS /data/Lora /data/ControlNet /data/openpose echo "Downloading, this might take a while..." From 555c26b7ce435001bc86684c7e6f314e88c6ed01 Mon Sep 17 00:00:00 2001 From: AJ Walter Date: Sun, 16 Apr 2023 03:32:03 -0500 Subject: [PATCH 3/4] Make Dockerfiles OCI compliant (#408) ## Justification Closes issue #352 This update makes the Dockerfiles OCI compliant, making it easier to use Buildah or other image building techniques that require it ## Implementation This changes a few things, listed below: * auto: Download container is switched to alpine. The `git` container specified the `/git` directory as a volume. As such, all the files under `/git` would be lost after each script invoke. Alpine is used later in the build process anyway, so it shouldn't be any extra cost to switch to it * auto: "New" clone.sh script is copied into the container, which is basically just the previous clone script that was embedded in the Dockerfile. * all: `< --- services/AUTOMATIC1111/Dockerfile | 52 +++++++++++++------------------ services/AUTOMATIC1111/clone.sh | 11 +++++++ services/invoke/Dockerfile | 47 +++++++++++++--------------- services/sygil/Dockerfile | 42 +++++++++++-------------- 4 files changed, 71 insertions(+), 81 deletions(-) create mode 100644 services/AUTOMATIC1111/clone.sh diff --git a/services/AUTOMATIC1111/Dockerfile b/services/AUTOMATIC1111/Dockerfile index b2d44d8..a610d0c 100644 --- a/services/AUTOMATIC1111/Dockerfile +++ b/services/AUTOMATIC1111/Dockerfile @@ -1,14 +1,6 @@ -# syntax=docker/dockerfile:1 - FROM alpine/git:2.36.2 as download -SHELL ["/bin/sh", "-ceuxo", "pipefail"] - -RUN < /clone.sh -mkdir -p repositories/"$1" && cd repositories/"$1" && git init && git remote add origin "$2" && git fetch origin "$3" --depth=1 && git reset --hard "$3" && rm -rf .git -EOE -EOF +COPY clone.sh /clone.sh RUN . /clone.sh taming-transformers https://github.com/CompVis/taming-transformers.git 24268930bf1dce879235a7fddd0b2355b84d7ea6 \ && rm -rf data assets **/*.ipynb @@ -30,21 +22,19 @@ RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diff FROM python:3.10.9-slim -SHELL ["/bin/bash", "-ceuxo", "pipefail"] - ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN PIP_NO_CACHE_DIR=1 pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117 +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117 RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils -y && apt-get clean -RUN --mount=type=cache,target=/root/.cache/pip < req.txt -pip install -r req.txt -rm req.txt -EOF +RUN --mount=type=cache,target=/root/.cache/pip \ + git reset --hard f232068ab89bd80e4f5f3133dcdb62ea78f1d0f7 && \ + git config --global http.postBuffer 1048576000 && \ + egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt > req.txt && \ + pip install -r req.txt && \ + rm req.txt # patch match: # https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md -RUN <=4.24' # add info COPY . /docker/ -RUN < Date: Sun, 16 Apr 2023 10:56:27 +0200 Subject: [PATCH 4/4] Refactor invoke (#405) Fixes a problem with cross attention class missing from diffusers models are now taken from the huggingFace cache. https://github.com/invoke-ai/InvokeAI/commit/50eb02f68be912276a9c106d5e8038a5671a0386 --- docker-compose.yml | 4 ++-- services/invoke/Dockerfile | 43 ++++++++++++++++------------------- services/invoke/entrypoint.sh | 17 +++++++------- services/invoke/models.yaml | 23 ------------------- 4 files changed, 30 insertions(+), 57 deletions(-) delete mode 100644 services/invoke/models.yaml diff --git a/docker-compose.yml b/docker-compose.yml index e005e77..2d17b6c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -43,10 +43,10 @@ services: <<: *base_service profiles: ["invoke"] build: ./services/invoke/ - image: sd-invoke:26 + image: sd-invoke:27 environment: - PRELOAD=true - - CLI_ARGS= + - CLI_ARGS=--no-nsfw_checker --no-safety_checker --xformers sygil: &sygil diff --git a/services/invoke/Dockerfile b/services/invoke/Dockerfile index b6045ab..514d525 100644 --- a/services/invoke/Dockerfile +++ b/services/invoke/Dockerfile @@ -11,53 +11,48 @@ ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 RUN --mount=type=cache,target=/root/.cache/pip pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117 -RUN apt-get update && apt-get install git -y && apt-get clean - -RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion - -WORKDIR /stable-diffusion - -RUN --mount=type=cache,target=/root/.cache/pip \ - git reset --hard f232068ab89bd80e4f5f3133dcdb62ea78f1d0f7 && \ - git config --global http.postBuffer 1048576000 && \ - egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt > req.txt && \ - pip install -r req.txt && \ - rm req.txt - # patch match: # https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md -RUN \ +RUN --mount=type=cache,target=/var/cache/apt \ apt-get update && \ - # apt-get install build-essential python3-opencv libopencv-dev -y && \ - apt-get install make g++ libopencv-dev -y && \ + apt-get install make g++ git libopencv-dev -y && \ apt-get clean && \ cd /usr/lib/x86_64-linux-gnu/pkgconfig/ && \ ln -sf opencv4.pc opencv.pc -ARG BRANCH=main SHA=6e0c6d9cc9f6bdbdefc4b9e94bc1ccde1b04aa42 + +ENV ROOT=/InvokeAI +RUN git clone https://github.com/invoke-ai/InvokeAI.git ${ROOT} +WORKDIR ${ROOT} + +RUN --mount=type=cache,target=/root/.cache/pip \ + git reset --hard 4463124bddd221c333d4c70e73aa2949ad35453d && \ + pip install . + + +ARG BRANCH=main SHA=50eb02f68be912276a9c106d5e8038a5671a0386 RUN --mount=type=cache,target=/root/.cache/pip \ git fetch && \ git reset --hard && \ git checkout ${BRANCH} && \ git reset --hard ${SHA} && \ - pip install . - + pip install -U . RUN --mount=type=cache,target=/root/.cache/pip \ - --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.15-cp310-cp310-linux_x86_64.whl \ - pip install -U opencv-python-headless huggingface_hub triton /xformers-0.0.15-cp310-cp310-linux_x86_64.whl && \ + --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.17-cp310-cp310-linux_x86_64.whl \ + pip install -U opencv-python-headless triton /xformers-0.0.17-cp310-cp310-linux_x86_64.whl && \ python3 -c "from patchmatch import patch_match" -RUN touch invokeai.init COPY . /docker/ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility ENV NVIDIA_VISIBLE_DEVICES=all -ENV PYTHONUNBUFFERED=1 ROOT=/stable-diffusion PYTHONPATH="${PYTHONPATH}:${ROOT}" PRELOAD=false CLI_ARGS="" HF_HOME=/root/.cache/huggingface +ENV PYTHONUNBUFFERED=1 PRELOAD=false HF_HOME=/root/.cache/huggingface CONFIG_DIR=/data/config/invoke CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] -CMD invokeai --web --host 0.0.0.0 --port 7860 --config /docker/models.yaml --root_dir ${ROOT} --outdir /output/invoke ${CLI_ARGS} +CMD invokeai --web --host 0.0.0.0 --port 7860 --root_dir ${ROOT} --config ${CONFIG_DIR}/models.yaml --outdir /output/invoke ${CLI_ARGS} +# TODO: make sure the config is persisted between sessions diff --git a/services/invoke/entrypoint.sh b/services/invoke/entrypoint.sh index a0d0410..3594c85 100755 --- a/services/invoke/entrypoint.sh +++ b/services/invoke/entrypoint.sh @@ -4,25 +4,25 @@ set -Eeuo pipefail declare -A MOUNTS +mkdir -p ${CONFIG_DIR} + # cache MOUNTS["/root/.cache"]=/data/.cache/ +# this is really just a hack to avoid migrations +rm -rf ${HF_HOME}/diffusers + # ui specific MOUNTS["${ROOT}/models/codeformer"]=/data/Codeformer/ - MOUNTS["${ROOT}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth -MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/.cache/ - +MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/GFPGAN/ MOUNTS["${ROOT}/models/realesrgan"]=/data/RealESRGAN/ -MOUNTS["${ROOT}/models/bert-base-uncased"]=/data/.cache/huggingface/transformers/ -MOUNTS["${ROOT}/models/openai/clip-vit-large-patch14"]=/data/.cache/huggingface/transformers/ -MOUNTS["${ROOT}/models/CompVis/stable-diffusion-safety-checker"]=/data/.cache/huggingface/transformers/ +MOUNTS["${ROOT}/models/ldm"]=/data/.cache/invoke/ldm/ MOUNTS["${ROOT}/embeddings"]=/data/embeddings/ # hacks -MOUNTS["${ROOT}/models/clipseg"]=/data/.cache/invoke/clipseg/ for to_path in "${!MOUNTS[@]}"; do set -Eeuo pipefail @@ -40,7 +40,8 @@ done if "${PRELOAD}" == "true"; then set -Eeuo pipefail - invokeai-configure --skip-sd-weights --root ${ROOT} --yes + invokeai-configure --root ${ROOT} --yes + cp ${ROOT}/configs/models.yaml ${CONFIG_DIR}/models.yaml fi exec "$@" diff --git a/services/invoke/models.yaml b/services/invoke/models.yaml deleted file mode 100644 index 0efe34e..0000000 --- a/services/invoke/models.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# This file describes the alternative machine learning models -# available to InvokeAI script. -# -# To add a new model, follow the examples below. Each -# model requires a model config file, a weights file, -# and the width and height of the images it -# was trained on. -stable-diffusion-1.5: - description: Stable Diffusion version 1.5 - weights: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt - vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt - config: ./invokeai/configs/stable-diffusion/v1-inference.yaml - width: 512 - height: 512 - default: true -inpainting-1.5: - description: RunwayML SD 1.5 model optimized for inpainting - weights: /data/StableDiffusion/sd-v1-5-inpainting.ckpt - vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt - config: ./invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml - width: 512 - height: 512 - default: false