From 0fedf81d5690a348d3e2fa81fcb2a17a0bef60a6 Mon Sep 17 00:00:00 2001 From: Matthew Meyer Date: Sat, 11 Mar 2023 17:21:42 -0600 Subject: [PATCH] Added services for invoke and sygil AMD --- docker-compose.nvidia.yml | 76 ----------------- docker-compose.yml | 86 +++++++++++++++----- services/AUTOMATIC1111-AMD/Dockerfile | 5 +- services/invoke-AMD/Dockerfile | 71 ++++++++++++++++ services/invoke-AMD/entrypoint.sh | 46 +++++++++++ services/invoke-AMD/models.yaml | 23 ++++++ services/sygil-AMD/Dockerfile | 49 +++++++++++ services/sygil-AMD/info.py | 13 +++ services/sygil-AMD/mount.sh | 32 ++++++++ services/sygil-AMD/run.sh | 10 +++ services/sygil-AMD/userconfig_streamlit.yaml | 11 +++ 11 files changed, 325 insertions(+), 97 deletions(-) delete mode 100644 docker-compose.nvidia.yml create mode 100644 services/invoke-AMD/Dockerfile create mode 100755 services/invoke-AMD/entrypoint.sh create mode 100644 services/invoke-AMD/models.yaml create mode 100644 services/sygil-AMD/Dockerfile create mode 100644 services/sygil-AMD/info.py create mode 100755 services/sygil-AMD/mount.sh create mode 100755 services/sygil-AMD/run.sh create mode 100644 services/sygil-AMD/userconfig_streamlit.yaml diff --git a/docker-compose.nvidia.yml b/docker-compose.nvidia.yml deleted file mode 100644 index 9e57403..0000000 --- a/docker-compose.nvidia.yml +++ /dev/null @@ -1,76 +0,0 @@ -version: '3.9' - -x-base_service: - &base_service - ports: - - "7860:7860" - volumes: - - &v1 ./data:/data - - &v2 ./output:/output - stop_signal: SIGINT - deploy: - resources: - reservations: - devices: - - driver: nvidia - device_ids: [ '0' ] - capabilities: [ gpu ] - -name: webui-docker - -services: - download: - build: ./services/download/ - profiles: [ "download" ] - volumes: - - *v1 - - auto: - &automatic - <<: *base_service - profiles: [ "auto" ] - build: ./services/AUTOMATIC1111 - image: sd-auto:47 - environment: - - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api - - auto-amd: - &automatic - <<: *base_service - profiles: [ "auto-amd" ] - build: ./services/AUTOMATIC1111-AMD - image: sd-auto:47 - environment: - - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api --no-half --precision full --opt-sub-quad-attention - - auto-cpu: - <<: *automatic - profiles: [ "auto-cpu" ] - deploy: {} - environment: - - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api - - invoke: - <<: *base_service - profiles: [ "invoke" ] - build: ./services/invoke/ - image: sd-invoke:26 - environment: - - PRELOAD=true - - CLI_ARGS= - - sygil: - &sygil - <<: *base_service - profiles: [ "sygil" ] - build: ./services/sygil/ - image: sd-sygil:16 - environment: - - CLI_ARGS=--optimized-turbo - - USE_STREAMLIT=0 - - sygil-sl: - <<: *sygil - profiles: [ "sygil-sl" ] - environment: - - USE_STREAMLIT=1 diff --git a/docker-compose.yml b/docker-compose.yml index 683dc09..f1b5164 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,63 +1,109 @@ version: '3.9' -x-base_service: &base_service - ports: - - "7860:7860" - volumes: - - &v1 ./data:/data - - &v2 ./output:/output - stop_signal: SIGINT - group_add: - - video - devices: - - "/dev/dri" - - "/dev/kfd" +x-base_service: + &base_service + ports: + - "7860:7860" + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + stop_signal: SIGINT + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: [ '0' ] + capabilities: [ gpu ] + +x-base_service_amd: + &base_service_amd + ports: + - "7860:7860" + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + stop_signal: SIGINT + group_add: + - video + devices: + - "/dev/dri" + - "/dev/kfd" name: webui-docker services: download: build: ./services/download/ - profiles: ["download"] + profiles: [ "download" ] volumes: - *v1 - auto: &automatic + auto: + &automatic <<: *base_service - profiles: ["auto"] + profiles: [ "auto" ] build: ./services/AUTOMATIC1111 image: sd-auto:48 environment: - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api + auto-amd: + &automatic + <<: *base_service_amd + profiles: [ "auto-amd" ] + build: ./services/AUTOMATIC1111-AMD + image: sd-auto:48 + environment: + - CLI_ARGS=--allow-code --medvram --no-half --precision full --enable-insecure-extension-access --api + auto-cpu: <<: *automatic - profiles: ["auto-cpu"] + profiles: [ "auto-cpu" ] deploy: {} environment: - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api invoke: <<: *base_service - profiles: ["invoke"] + profiles: [ "invoke" ] build: ./services/invoke/ image: sd-invoke:26 environment: - PRELOAD=true - CLI_ARGS= + invoke-amd: + <<: *base_service_amd + profiles: [ "invoke-amd" ] + build: ./services/invoke-AMD/ + image: sd-invoke:26 + environment: + - PRELOAD=true + - CLI_ARGS= - sygil: &sygil + sygil: + &sygil <<: *base_service - profiles: ["sygil"] + profiles: [ "sygil" ] build: ./services/sygil/ image: sd-sygil:16 environment: - CLI_ARGS=--optimized-turbo - USE_STREAMLIT=0 + sygil-amd: + &sygil + <<: *base_service_amd + profiles: [ "sygil-amd" ] + build: ./services/sygil-AMD/ + image: sd-sygil:16 + environment: + - CLI_ARGS=--optimized-turbo + - USE_STREAMLIT=0 + sygil-sl: <<: *sygil - profiles: ["sygil-sl"] + profiles: [ "sygil-sl" ] environment: - USE_STREAMLIT=1 diff --git a/services/AUTOMATIC1111-AMD/Dockerfile b/services/AUTOMATIC1111-AMD/Dockerfile index 716ec80..5903c44 100644 --- a/services/AUTOMATIC1111-AMD/Dockerfile +++ b/services/AUTOMATIC1111-AMD/Dockerfile @@ -30,7 +30,7 @@ SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 +RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils bash -y && apt-get clean @@ -86,5 +86,8 @@ WORKDIR ${ROOT} ENV CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] + +# Depending on your actual GPU you may want to comment this out. +# Without this you may get the error "hipErrorNoBinaryForGpu: Unable to find code object for all current devices!" ENV HSA_OVERRIDE_GFX_VERSION=10.3.0 CMD python -u webui.py --listen --port 7860 ${CLI_ARGS} diff --git a/services/invoke-AMD/Dockerfile b/services/invoke-AMD/Dockerfile new file mode 100644 index 0000000..073582f --- /dev/null +++ b/services/invoke-AMD/Dockerfile @@ -0,0 +1,71 @@ +# syntax=docker/dockerfile:1 + +FROM alpine:3.17 as xformers +RUN apk add --no-cache aria2 +RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/5.0.0/xformers-0.0.17.dev449-cp310-cp310-manylinux2014_x86_64.whl' + + + +FROM python:3.10-slim +SHELL ["/bin/bash", "-ceuxo", "pipefail"] + +ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 + + +RUN --mount=type=cache,target=/root/.cache/pip pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 + +RUN apt-get update && apt-get install git -y && apt-get clean + +RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion + +WORKDIR /stable-diffusion + +RUN --mount=type=cache,target=/root/.cache/pip < req.txt +pip install -r req.txt +rm req.txt +EOF + + +# patch match: +# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md +RUN <=4.24' + +# add info +COPY . /docker/ +RUN <For help and advanced usage guides,', """ +

+ Created using stable-diffusion-webui-docker. +

+

For help and advanced usage guides, +""", 1) +) diff --git a/services/sygil-AMD/mount.sh b/services/sygil-AMD/mount.sh new file mode 100755 index 0000000..cc0dc9d --- /dev/null +++ b/services/sygil-AMD/mount.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -Eeuo pipefail + +declare -A MOUNTS + +ROOT=/stable-diffusion/src + +# cache +MOUNTS["/root/.cache"]=/data/.cache +# ui specific +MOUNTS["${PWD}/models/realesrgan"]=/data/RealESRGAN +MOUNTS["${PWD}/models/ldsr"]=/data/LDSR +MOUNTS["${PWD}/models/custom"]=/data/StableDiffusion + +# hack +MOUNTS["${PWD}/models/gfpgan/GFPGANv1.3.pth"]=/data/GFPGAN/GFPGANv1.4.pth +MOUNTS["${PWD}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth +MOUNTS["${PWD}/gfpgan/weights"]=/data/.cache + + +for to_path in "${!MOUNTS[@]}"; do + set -Eeuo pipefail + from_path="${MOUNTS[${to_path}]}" + rm -rf "${to_path}" + mkdir -p "$(dirname "${to_path}")" + ln -sT "${from_path}" "${to_path}" + echo Mounted $(basename "${from_path}") +done + +# streamlit config +ln -sf /docker/userconfig_streamlit.yaml /stable-diffusion/configs/webui/userconfig_streamlit.yaml diff --git a/services/sygil-AMD/run.sh b/services/sygil-AMD/run.sh new file mode 100755 index 0000000..89f7959 --- /dev/null +++ b/services/sygil-AMD/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -Eeuo pipefail + +echo "USE_STREAMLIT = ${USE_STREAMLIT}" +if [ "${USE_STREAMLIT}" == "1" ]; then + python -u -m streamlit run scripts/webui_streamlit.py +else + python3 -u scripts/webui.py --outdir /output --ckpt /data/StableDiffusion/v1-5-pruned-emaonly.ckpt ${CLI_ARGS} +fi diff --git a/services/sygil-AMD/userconfig_streamlit.yaml b/services/sygil-AMD/userconfig_streamlit.yaml new file mode 100644 index 0000000..07a20af --- /dev/null +++ b/services/sygil-AMD/userconfig_streamlit.yaml @@ -0,0 +1,11 @@ +# https://github.com/Sygil-Dev/sygil-webui/blob/master/configs/webui/webui_streamlit.yaml +general: + version: 1.24.6 + outdir: /output + default_model: "Stable Diffusion v1.5" + default_model_path: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt + outdir_txt2img: /output/txt2img + outdir_img2img: /output/img2img + outdir_img2txt: /output/img2txt + optimized: True + optimized_turbo: True