From 473672f9bec27a614c0cd7c7ef1ab1615ae1e76a Mon Sep 17 00:00:00 2001 From: lslowmotion Date: Thu, 2 May 2024 11:05:13 +0700 Subject: [PATCH] initial modification to support Podman --- docker-compose.yml | 97 ++++++++++++++++--------------- podman-compose.yml | 20 +++++++ selinux-cache.sh | 24 ++++++++ services/AUTOMATIC1111/Dockerfile | 33 +++++++---- services/comfy/Dockerfile | 8 ++- services/invoke/Dockerfile | 13 +++-- 6 files changed, 128 insertions(+), 67 deletions(-) create mode 100644 podman-compose.yml create mode 100755 selinux-cache.sh diff --git a/docker-compose.yml b/docker-compose.yml index a1cc6c2..9e4d30c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,51 +3,56 @@ version: '3.9' x-base_service: &base_service ports: - "${WEBUI_PORT:-7860}:7860" - volumes: - - &v1 ./data:/data - - &v2 ./output:/output - stop_signal: SIGKILL - tty: true - deploy: - resources: - reservations: - devices: - - driver: nvidia - device_ids: ['0'] - capabilities: [compute, utility] + # volumes: + # - &v1 ./data:/data + # - &v2 ./output:/output + # stop_signal: SIGKILL + # tty: true + # security_opt: + # - label=type:nvidia_container_t + # deploy: + # resources: + # reservations: + # devices: + # - /dev/dri:/dev/kfd + # - driver: nvidia + # device_ids: ['0'] + # capabilities: [compute, utility] name: webui-docker services: download: build: ./services/download/ - profiles: ["download"] - volumes: - - *v1 + # profiles: ["download"] + # volumes: + # - *v1 auto: &automatic <<: *base_service - profiles: ["auto"] + # profiles: ["auto"] build: ./services/AUTOMATIC1111 image: sd-auto:72 environment: - - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api + # - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api + - CLI_ARGS=--allow-code --enable-insecure-extension-access --api - auto-cpu: - <<: *automatic - profiles: ["auto-cpu"] - deploy: {} - environment: - - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api - invoke: &invoke - <<: *base_service - profiles: ["invoke"] - build: ./services/invoke/ - image: sd-invoke:30 - environment: - - PRELOAD=true - - CLI_ARGS=--xformers + # auto-cpu: + # <<: *automatic + # profiles: ["auto-cpu"] + # deploy: {} + # environment: + # - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api + # + # invoke: &invoke + # <<: *base_service + # profiles: ["invoke"] + # build: ./services/invoke/ + # image: sd-invoke:30 + # environment: + # - PRELOAD=true + # - CLI_ARGS=--xformers # invoke-cpu: # <<: *invoke @@ -56,18 +61,18 @@ services: # - PRELOAD=true # - CLI_ARGS=--always_use_cpu - comfy: &comfy - <<: *base_service - profiles: ["comfy"] - build: ./services/comfy/ - image: sd-comfy:6 - environment: - - CLI_ARGS= - - - comfy-cpu: - <<: *comfy - profiles: ["comfy-cpu"] - deploy: {} - environment: - - CLI_ARGS=--cpu + # comfy: &comfy + # <<: *base_service + # profiles: ["comfy"] + # build: ./services/comfy/ + # image: sd-comfy:6 + # environment: + # - CLI_ARGS= + # + # + # comfy-cpu: + # <<: *comfy + # profiles: ["comfy-cpu"] + # deploy: {} + # environment: + # - CLI_ARGS=--cpu diff --git a/podman-compose.yml b/podman-compose.yml new file mode 100644 index 0000000..6c035bb --- /dev/null +++ b/podman-compose.yml @@ -0,0 +1,20 @@ +x-base_service: &base_service + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + # runtime: nvidia + # security_opt: + # - label=type:nvidia_container_t + +services: + download: + volumes: + - *v1 + + auto: &automatic + <<: *base_service + devices: + - /dev/dri + - /dev/kfd + ipc: + - host diff --git a/selinux-cache.sh b/selinux-cache.sh new file mode 100755 index 0000000..6099f64 --- /dev/null +++ b/selinux-cache.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# This script fixes selinux blocking file access to the pip and apt mount cache +# on container builds after the first one. This file uses "Z" instead of "z" +# because sometimes the shared label does not always get applied to every file. +# "Z" will force all files to be accessable by the current build step, fixing +# the "file not found" issue. + +disabled_pip="/root/.cache/pip " +enabled_pip="/root/.cache/pip,Z " +disabled_apt="/var/cache/apt " +enabled_apt="/var/cache/apt,Z " + +for file in ./services/*/Dockerfile; do + if [[ "$1" == "--disable" ]]; then + sed -i "s|$enabled_pip|$disabled_pip|g" "$file" + sed -i "s|$enabled_apt|$disabled_apt|g" "$file" + echo "Disabled selinux relabeling for cache in $file." + else + sed -i "s|$disabled_pip|$enabled_pip|g" "$file" + sed -i "s|$disabled_apt|$enabled_apt|g" "$file" + echo "Enabled selinux relabeling for cache in $file." + fi +done diff --git a/services/AUTOMATIC1111/Dockerfile b/services/AUTOMATIC1111/Dockerfile index 4c478b3..eab3a41 100644 --- a/services/AUTOMATIC1111/Dockerfile +++ b/services/AUTOMATIC1111/Dockerfile @@ -15,35 +15,41 @@ RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interr RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f -FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime +# FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime +FROM rocm/pytorch:rocm6.1_ubuntu22.04_py3.10_pytorch_2.1.2 ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN --mount=type=cache,target=/var/cache/apt \ +RUN --mount=type=cache,target=/var/cache/apt,Z \ apt-get update && \ # we need those - apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \ + apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 cargo\ # extensions needs those ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential WORKDIR / -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \ cd stable-diffusion-webui && \ - git reset --hard cf2772fab0af5573da775e7437e6acdca424f26e && \ - pip install -r requirements_versions.txt - +# git reset --hard cf2772fab0af5573da775e7437e6acdca424f26e && \ + git checkout tags/v1.9.3 && \ + pip install --upgrade pip && \ + pip install -r requirements.txt && \ + pip install pytorch_lightning==1.7.7 torchmetrics==0.11.4 pydantic==1.10.11 && \ + mkdir repositories && cd repositories && \ + git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git ENV ROOT=/stable-diffusion-webui COPY --from=download /repositories/ ${ROOT}/repositories/ RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install pyngrok xformers==0.0.23.post1 \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ + # pip install pyngrok xformers==0.0.23.post1 \ + pip install pyngrok \ git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \ git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \ git+https://github.com/mlfoundations/open_clip.git@v2.20.0 @@ -58,11 +64,14 @@ COPY . /docker RUN \ # mv ${ROOT}/style.css ${ROOT}/user.css && \ # one of the ugliest hacks I ever wrote \ - sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.10/site-packages/gradio/routes.py && \ +# ls /opt/conda/lib/python3.12/site-packages && \ +# which python3 && \ +# pip show gradio && \ + sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/envs/py_3.10/lib/python3.10/site-packages/gradio/routes.py && \ git config --global --add safe.directory '*' WORKDIR ${ROOT} -ENV NVIDIA_VISIBLE_DEVICES=all +# ENV NVIDIA_VISIBLE_DEVICES=all ENV CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] diff --git a/services/comfy/Dockerfile b/services/comfy/Dockerfile index f813c68..363df0b 100644 --- a/services/comfy/Dockerfile +++ b/services/comfy/Dockerfile @@ -1,11 +1,12 @@ -FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime +# FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime +FROM rocm/pytorch:rocm6.1_ubuntu22.04_py3.10_pytorch_2.1.2 ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 RUN apt-get update && apt-get install -y git && apt-get clean ENV ROOT=/stable-diffusion -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ git clone https://github.com/comfyanonymous/ComfyUI.git ${ROOT} && \ cd ${ROOT} && \ git checkout master && \ @@ -16,7 +17,8 @@ WORKDIR ${ROOT} COPY . /docker/ RUN chmod u+x /docker/entrypoint.sh && cp /docker/extra_model_paths.yaml ${ROOT} -ENV NVIDIA_VISIBLE_DEVICES=all PYTHONPATH="${PYTHONPATH}:${PWD}" CLI_ARGS="" +# ENV NVIDIA_VISIBLE_DEVICES=all PYTHONPATH="${PYTHONPATH}:${PWD}" CLI_ARGS="" +ENV PYTHONPATH="${PYTHONPATH}:${PWD}" CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] CMD python -u main.py --listen --port 7860 ${CLI_ARGS} diff --git a/services/invoke/Dockerfile b/services/invoke/Dockerfile index 209c368..16d8db1 100644 --- a/services/invoke/Dockerfile +++ b/services/invoke/Dockerfile @@ -3,13 +3,14 @@ RUN apk add --no-cache aria2 RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/6.0.0/xformers-0.0.21.dev544-cp310-cp310-manylinux2014_x86_64-pytorch201.whl' -FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime +# FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime +FROM rocm/pytorch:rocm6.1_ubuntu22.04_py3.10_pytorch_2.1.2 ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 # patch match: # https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md -RUN --mount=type=cache,target=/var/cache/apt \ +RUN --mount=type=cache,target=/var/cache/apt,Z \ apt-get update && \ apt-get install make g++ git libopencv-dev -y && \ apt-get clean && \ @@ -21,20 +22,20 @@ ENV ROOT=/InvokeAI RUN git clone https://github.com/invoke-ai/InvokeAI.git ${ROOT} WORKDIR ${ROOT} -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ git reset --hard f3b2e02921927d9317255b1c3811f47bd40a2bf9 && \ pip install -e . ARG BRANCH=main SHA=f3b2e02921927d9317255b1c3811f47bd40a2bf9 -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ git fetch && \ git reset --hard && \ git checkout ${BRANCH} && \ git reset --hard ${SHA} && \ pip install -U -e . -RUN --mount=type=cache,target=/root/.cache/pip \ +RUN --mount=type=cache,target=/root/.cache/pip,Z \ --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.21-cp310-cp310-linux_x86_64.whl \ pip install -U opencv-python-headless triton /xformers-0.0.21-cp310-cp310-linux_x86_64.whl && \ python3 -c "from patchmatch import patch_match" @@ -42,7 +43,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ COPY . /docker/ -ENV NVIDIA_VISIBLE_DEVICES=all +# ENV NVIDIA_VISIBLE_DEVICES=all ENV PYTHONUNBUFFERED=1 PRELOAD=false HF_HOME=/root/.cache/huggingface CONFIG_DIR=/data/config/invoke CLI_ARGS="" EXPOSE 7860