mirror of
https://github.com/AbdBarho/stable-diffusion-webui-docker.git
synced 2026-02-03 22:24:19 +01:00
Are Tests
This commit is contained in:
parent
fcf52e301e
commit
821e0bc977
|
|
@ -1,14 +1,10 @@
|
|||
version: '3.9'
|
||||
|
||||
x-base_service: &base_service
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "7861:7861"
|
||||
volumes:
|
||||
- &v1 ./data:/data
|
||||
- &v2 ./output:/output
|
||||
- &v3 ./output0:/output0
|
||||
- &v4 ./output1:/output1
|
||||
- &v2 ./output0:/output0
|
||||
- &v3 ./output1:/output1
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
|
|
@ -17,7 +13,7 @@ x-base_service: &base_service
|
|||
device_ids: ['0,1']
|
||||
capabilities: [gpu]
|
||||
|
||||
name: webui-docker
|
||||
name: webui-docker-multi-gpu
|
||||
|
||||
services:
|
||||
download:
|
||||
|
|
@ -29,47 +25,19 @@ services:
|
|||
auto0: &automatic0
|
||||
<<: *base_service
|
||||
profiles: ["auto0"]
|
||||
build: ./services/AUTOMATIC1111_0
|
||||
image: sd-auto0:33
|
||||
build: ./services/AUTOMATIC_0
|
||||
image: sd-auto-0:33
|
||||
ports:
|
||||
- "7860:7860"
|
||||
environment:
|
||||
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api
|
||||
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api --device-id 0 --port 7860
|
||||
|
||||
auto1: &automatic1
|
||||
<<: *base_service
|
||||
profiles: ["auto1"]
|
||||
build: ./services/AUTOMATIC1111_1
|
||||
image: sd-auto1:33
|
||||
build: ./services/AUTOMATIC_1
|
||||
image: sd-auto-1:33
|
||||
ports:
|
||||
- "7861:7861"
|
||||
environment:
|
||||
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api
|
||||
|
||||
auto-cpu:
|
||||
<<: *automatic
|
||||
profiles: ["auto-cpu"]
|
||||
deploy: {}
|
||||
environment:
|
||||
- CLI_ARGS=--no-half --precision full
|
||||
|
||||
invoke:
|
||||
<<: *base_service
|
||||
profiles: ["invoke"]
|
||||
build: ./services/invoke/
|
||||
image: sd-invoke:17
|
||||
environment:
|
||||
- PRELOAD=true
|
||||
- CLI_ARGS=
|
||||
|
||||
|
||||
sygil: &sygil
|
||||
<<: *base_service
|
||||
profiles: ["sygil"]
|
||||
build: ./services/sygil/
|
||||
image: sd-sygil:16
|
||||
environment:
|
||||
- CLI_ARGS=--optimized-turbo
|
||||
- USE_STREAMLIT=0
|
||||
|
||||
sygil-sl:
|
||||
<<: *sygil
|
||||
profiles: ["sygil-sl"]
|
||||
environment:
|
||||
- USE_STREAMLIT=1
|
||||
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api --device-id 1 --port 7861
|
||||
2
output/.gitignore
vendored
2
output/.gitignore
vendored
|
|
@ -1,2 +0,0 @@
|
|||
/*
|
||||
!/.gitignore
|
||||
|
|
@ -90,4 +90,4 @@ WORKDIR ${ROOT}
|
|||
ENV CLI_ARGS=""
|
||||
EXPOSE 7861
|
||||
ENTRYPOINT ["/docker/entrypoint.sh"]
|
||||
CMD python3 -u webui.py --listen --port 7860 ${CLI_ARGS}
|
||||
CMD python3 -u webui.py --listen --port 7861 ${CLI_ARGS}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM python:3.10-slim
|
||||
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1
|
||||
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install torch==1.12.0+cu116 --extra-index-url https://download.pytorch.org/whl/cu116
|
||||
|
||||
RUN apt-get update && apt-get install git -y && apt-get clean
|
||||
|
||||
RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion
|
||||
|
||||
WORKDIR /stable-diffusion
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip <<EOF
|
||||
git reset --hard 5c31feb3a1096d437c94b6e1c3224eb7a7224a85
|
||||
git config --global http.postBuffer 1048576000
|
||||
pip install -r binary_installer/py3.10-linux-x86_64-cuda-reqs.txt
|
||||
EOF
|
||||
|
||||
|
||||
# patch match:
|
||||
# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md
|
||||
RUN <<EOF
|
||||
apt-get update
|
||||
# apt-get install build-essential python3-opencv libopencv-dev -y
|
||||
apt-get install make g++ libopencv-dev -y
|
||||
apt-get clean
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
EOF
|
||||
|
||||
ARG BRANCH=main SHA=26e413ae9cf8dc04c617ca451a91a1624bfdf0c0
|
||||
RUN --mount=type=cache,target=/root/.cache/pip <<EOF
|
||||
git fetch
|
||||
git reset --hard
|
||||
git checkout ${BRANCH}
|
||||
git reset --hard ${SHA}
|
||||
pip install -r binary_installer/py3.10-linux-x86_64-cuda-reqs.txt
|
||||
EOF
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install -U --force-reinstall opencv-python-headless huggingface_hub && \
|
||||
python3 -c "from patchmatch import patch_match"
|
||||
|
||||
|
||||
RUN touch invokeai.init
|
||||
COPY . /docker/
|
||||
|
||||
|
||||
ENV ROOT=/stable-diffusion PYTHONPATH="${PYTHONPATH}:${ROOT}" PRELOAD=false CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
|
||||
ENTRYPOINT ["/docker/entrypoint.sh"]
|
||||
CMD python3 -u scripts/invoke.py --web --host 0.0.0.0 --port 7860 --config /docker/models.yaml --root_dir ${ROOT} --outdir /output/invoke ${CLI_ARGS}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
declare -A MOUNTS
|
||||
|
||||
# cache
|
||||
MOUNTS["/root/.cache"]=/data/.cache/
|
||||
|
||||
# ui specific
|
||||
MOUNTS["${ROOT}/models/codeformer"]=/data/Codeformer/
|
||||
|
||||
MOUNTS["${ROOT}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth
|
||||
MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/.cache/
|
||||
|
||||
MOUNTS["${ROOT}/models/realesrgan"]=/data/RealESRGAN/
|
||||
|
||||
MOUNTS["${ROOT}/models/bert-base-uncased"]=/data/.cache/huggingface/transformers/
|
||||
MOUNTS["${ROOT}/models/openai/clip-vit-large-patch14"]=/data/.cache/huggingface/transformers/
|
||||
MOUNTS["${ROOT}/models/CompVis/stable-diffusion-safety-checker"]=/data/.cache/huggingface/transformers/
|
||||
|
||||
MOUNTS["${ROOT}/embeddings"]=/data/embeddings/
|
||||
|
||||
# hacks
|
||||
MOUNTS["${ROOT}/models/clipseg"]=/data/.cache/invoke/clipseg/
|
||||
|
||||
for to_path in "${!MOUNTS[@]}"; do
|
||||
set -Eeuo pipefail
|
||||
from_path="${MOUNTS[${to_path}]}"
|
||||
rm -rf "${to_path}"
|
||||
mkdir -p "$(dirname "${to_path}")"
|
||||
# ends with slash, make it!
|
||||
if [[ "$from_path" == */ ]]; then
|
||||
mkdir -vp "$from_path"
|
||||
fi
|
||||
|
||||
ln -sT "${from_path}" "${to_path}"
|
||||
echo Mounted $(basename "${from_path}")
|
||||
done
|
||||
|
||||
if "${PRELOAD}" == "true"; then
|
||||
set -Eeuo pipefail
|
||||
python3 -u scripts/preload_models.py --skip-sd-weights --root ${ROOT} --config_file /docker/models.yaml
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
stable-diffusion-1.5:
|
||||
description: Stable Diffusion version 1.5
|
||||
weights: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt
|
||||
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
default: true
|
||||
inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
weights: /data/StableDiffusion/sd-v1-5-inpainting.ckpt
|
||||
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM python:3.8-slim
|
||||
|
||||
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip pip install torch==1.13.0 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN apt-get update && apt install gcc libsndfile1 ffmpeg build-essential zip unzip git -y && apt-get clean
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip <<EOF
|
||||
git config --global http.postBuffer 1048576000
|
||||
git clone https://github.com/Sygil-Dev/sygil-webui.git stable-diffusion
|
||||
cd stable-diffusion
|
||||
git reset --hard 5291437085bddd16d752f811b6552419a2044d12
|
||||
pip install -r requirements.txt
|
||||
EOF
|
||||
|
||||
|
||||
ARG BRANCH=master SHA=571fb897edd58b714bb385dfaa1ad59aecef8bc7
|
||||
RUN --mount=type=cache,target=/root/.cache/pip <<EOF
|
||||
cd stable-diffusion
|
||||
git fetch
|
||||
git checkout ${BRANCH}
|
||||
git reset --hard ${SHA}
|
||||
pip install -r requirements.txt
|
||||
EOF
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip pip install transformers==4.24.0
|
||||
|
||||
# add info
|
||||
COPY . /docker/
|
||||
RUN <<EOF
|
||||
python /docker/info.py /stable-diffusion/frontend/frontend.py
|
||||
chmod +x /docker/mount.sh /docker/run.sh
|
||||
# streamlit
|
||||
sed -i -- 's/8501/7860/g' /stable-diffusion/.streamlit/config.toml
|
||||
EOF
|
||||
|
||||
WORKDIR /stable-diffusion
|
||||
ENV PYTHONPATH="${PYTHONPATH}:${PWD}" STREAMLIT_SERVER_HEADLESS=true USE_STREAMLIT=0 CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
|
||||
CMD /docker/mount.sh && /docker/run.sh
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
file = Path(sys.argv[1])
|
||||
file.write_text(
|
||||
file.read_text()\
|
||||
.replace('<p>For help and advanced usage guides,', """
|
||||
<p>
|
||||
Created using <a href="https://github.com/AbdBarho/stable-diffusion-webui-docker">stable-diffusion-webui-docker</a>.
|
||||
</p>
|
||||
<p>For help and advanced usage guides,
|
||||
""", 1)
|
||||
)
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
declare -A MOUNTS
|
||||
|
||||
ROOT=/stable-diffusion/src
|
||||
|
||||
# cache
|
||||
MOUNTS["/root/.cache"]=/data/.cache
|
||||
# ui specific
|
||||
MOUNTS["${PWD}/models/realesrgan"]=/data/RealESRGAN
|
||||
MOUNTS["${PWD}/models/ldsr"]=/data/LDSR
|
||||
MOUNTS["${PWD}/models/custom"]=/data/StableDiffusion
|
||||
|
||||
# hack
|
||||
MOUNTS["${PWD}/models/gfpgan/GFPGANv1.3.pth"]=/data/GFPGAN/GFPGANv1.4.pth
|
||||
MOUNTS["${PWD}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth
|
||||
MOUNTS["${PWD}/gfpgan/weights"]=/data/.cache
|
||||
|
||||
|
||||
for to_path in "${!MOUNTS[@]}"; do
|
||||
set -Eeuo pipefail
|
||||
from_path="${MOUNTS[${to_path}]}"
|
||||
rm -rf "${to_path}"
|
||||
mkdir -p "$(dirname "${to_path}")"
|
||||
ln -sT "${from_path}" "${to_path}"
|
||||
echo Mounted $(basename "${from_path}")
|
||||
done
|
||||
|
||||
# streamlit config
|
||||
ln -sf /docker/userconfig_streamlit.yaml /stable-diffusion/configs/webui/userconfig_streamlit.yaml
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
echo "USE_STREAMLIT = ${USE_STREAMLIT}"
|
||||
if [ "${USE_STREAMLIT}" == "1" ]; then
|
||||
python -u -m streamlit run scripts/webui_streamlit.py
|
||||
else
|
||||
python3 -u scripts/webui.py --outdir /output --ckpt /data/StableDiffusion/v1-5-pruned-emaonly.ckpt ${CLI_ARGS}
|
||||
fi
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
# https://github.com/Sygil-Dev/sygil-webui/blob/master/configs/webui/webui_streamlit.yaml
|
||||
general:
|
||||
version: 1.24.6
|
||||
outdir: /output
|
||||
default_model: "Stable Diffusion v1.5"
|
||||
default_model_path: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt
|
||||
outdir_txt2img: /output/txt2img
|
||||
outdir_img2img: /output/img2img
|
||||
outdir_img2txt: /output/img2txt
|
||||
optimized: True
|
||||
optimized_turbo: True
|
||||
Loading…
Reference in a new issue