This commit is contained in:
Faptain 2025-09-18 13:39:45 +00:00 committed by GitHub
commit 4a776f7ca3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 1737 additions and 100 deletions

11
.dockerignore Normal file
View file

@ -0,0 +1,11 @@
.devscripts
.github
.vscode
.idea
.editorconfig
.gitattributes
.gitignore
*.md
docs
data
output

View file

@ -12,12 +12,66 @@ jobs:
build:
strategy:
matrix:
profile:
- auto
- comfy
- download
include:
- image: simonmcnair/AUTOMATIC1111
dockerfile: services/AUTOMATIC1111/Dockerfile
context: services/AUTOMATIC1111/
# - image: simonmcnair/AUTOMATIC1111-rocm
# dockerfile: services/AUTOMATIC1111/Dockerfile.rocm
# context: services/AUTOMATIC1111/
- image: simonmcnair/comfy
dockerfile: services/comfy/Dockerfile
context: services/comfy/
- image: simonmcnair/download
dockerfile: services/download/Dockerfile
context: services/download/
- image: simonmcnair/fooocus
dockerfile: services/fooocus/Dockerfile
context: services/fooocus/
- image: simonmcnair/forge
dockerfile: services/forge/Dockerfile
context: services/forge/
- image: simonmcnair/reforge
dockerfile: services/reforge/Dockerfile
context: services/reforge/
runs-on: ubuntu-latest
name: ${{ matrix.profile }}
steps:
- uses: actions/checkout@v3
- run: docker compose --profile ${{ matrix.profile }} build --progress plain
# - uses: actions/checkout@v3
# - run: docker compose --profile ${{ matrix.profile }} build --progress plain
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
with:
images: ${{ matrix.image }}
- name: Build and push Docker image
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
with:
context: ${{ matrix.context }}
file: ${{ matrix.dockerfile }}
push: true
#This is needed because it is called main instead of master to get the latest tag in docker
# tags: |
# set latest tag for default branch
# type=raw,value=latest,enable={{is_default_branch}}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

2
.gitignore vendored
View file

@ -4,3 +4,5 @@
# VSCode specific
*.code-workspace
/.vscode
.idea
TODO.md

View file

@ -1,3 +1,18 @@
I have forked this as AbdBarho hasn't been around for a while. I'm not great with git, Linux , docker or stable diffusion but I manage to get along a bit. Slowly. with more commits than it takes.
I am happy to look at PR's as and when. Also happy to pass this back, as and whe, and if, they return.
supports:
- AUTOMATIC1111
- comfy
- fooocus
- forge
- reforge
- swarmui _**(be aware comfyui backend can take some time to start, up to a minute or 2)**_
#### TODOs
1. [ ] Fix [Warning] [ComfyUI-0/STDERR] NameError: name 'NODE_CLASS_MAPPINGS' is not defined
# Stable Diffusion WebUI Docker
Run Stable Diffusion on your machine with a nice UI without any hassle!
@ -26,6 +41,14 @@ This repository provides multiple UIs for you to play around with stable diffusi
| -------------------------------------------------------------------------------- |
| ![](https://github.com/comfyanonymous/ComfyUI/raw/master/comfyui_screenshot.png) |
### [Fooocus](https://github.com/comfyanonymous/ComfyUI)
[Full feature list here](https://github.com/lllyasviel/Fooocus#fooocus), Screenshot:
| Simplified UI |
| -------------------------------------------------------------------------------- |
| ![](https://github.com/lllyasviel/Fooocus/assets/19834515/483fb86d-c9a2-4c20-997c-46dafc124f25) |
## Contributing
Contributions are welcome! **Create a discussion first of what the problem is and what you want to contribute (before you implement anything)**

6
data/.gitignore vendored
View file

@ -1,4 +1,2 @@
/.cache
/config
/embeddings
/models
/*
!/.gitignore

View file

@ -1,18 +1,33 @@
x-base_service: &base_service
stop_signal: SIGKILL
tty: true
restart: unless-stopped
deploy:
resources:
# limits:
# cpus: 8
# memory: 48G
reservations:
# cpus: 4
# memory: 24G
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [compute, utility, gpu]
x-defaults: &defaults
<<: *base_service
ports:
- "${WEBUI_PORT:-7860}:7860"
volumes:
- &v1 ./data:/data
- &v2 ./output:/output
stop_signal: SIGKILL
tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [compute, utility]
x-auto_service: &auto_service
<<: *defaults
container_name: auto
build: ./services/AUTOMATIC1111
image: sd-auto:latest
name: webui-docker
@ -23,33 +38,110 @@ services:
volumes:
- *v1
auto: &automatic
<<: *base_service
auto:
<<: *auto_service
profiles: ["auto"]
build: ./services/AUTOMATIC1111
image: sd-auto:78
environment:
- CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api
auto-cpu:
<<: *automatic
<<: *auto_service
profiles: ["auto-cpu"]
deploy: {}
environment:
- CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api
auto-full:
<<: *auto_service
profiles: [ "full" ]
environment:
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --no-half-vae --api
auto-rocm:
<<: *auto_service
profiles: ["auto-rocm"]
container_name: auto-rocm
build:
context: ./services/AUTOMATIC1111
dockerfile: ROCM.dockerfile
devices:
- "/dev/kfd"
- "/dev/dri"
deploy: {}
environment:
- CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api
comfy: &comfy
<<: *base_service
profiles: ["comfy"]
build: ./services/comfy/
image: sd-comfy:7
container_name: comfy
build: ./services/comfy
image: sd-comfy:latest
volumes:
- ./data/models:/opt/comfyui/models
- ./data/config/configs:/opt/comfyui/user/default/
- ./data/config/comfy/custom_nodes:/opt/comfyui/custom_nodes
- ./output/comfy:/opt/comfyui/output
ports:
- "${COMFYUI_PORT:-7861}:7861"
environment:
- COMFYUI_PATH=/opt/comfyui
- COMFYUI_MODEL_PATH=/opt/comfyui/models
- CLI_ARGS=
# - TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD=1
comfy-cpu:
<<: *comfy
profiles: ["comfy-cpu"]
deploy: {}
ports:
- "${COMFYUI_PORT:-7861}:7861"
environment:
- CLI_ARGS=--cpu
swarmui:
<<: *base_service
profiles: ["swarmui"]
container_name: swarmui
build: ./services/swarmui
image: sd-swarmui:latest
ports:
- "${SWARMUI_PORT:-7801}:7801"
volumes:
- ./data/swarmui:/SwarmUI/Data
- ./data/models:/SwarmUI/Models
- ./data/embeddings:/SwarmUI/Models/Embeddings
# comfyui
- ./data/config/comfy/custom_nodes:/SwarmUI/dlbackend/ComfyUI/custom_nodes
# - ./data/models/configs:/SwarmUI/dlbackend/ComfyUI/user/default/ # TODO rm old
- ./data/config/configs:/SwarmUI/dlbackend/ComfyUI/user/default/
# output
- ./output/swarmui:/SwarmUI/Output
- ./output/swarmui/comfy:/SwarmUI/dlbackend/ComfyUI/output
environment:
- COMFYUI_PATH=/opt/comfyui
- COMFYUI_MODEL_PATH=/opt/comfyui/models
reforge: &reforge
<<: *base_service
profiles: ["reforge"]
build: ./services/reforge
image: sd-reforge:latest
environment:
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api --pin-shared-memory --cuda-malloc --cuda-stream
forge: &forge
<<: *base_service
profiles: ["forge"]
build: ./services/forge
image: sd-forge:latest
environment:
- CLI_ARGS=--allow-code --xformers --enable-insecure-extension-access --api --pin-shared-memory --cuda-malloc --cuda-stream
fooocus: &fooocus
<<: *base_service
profiles: ["fooocus"]
build: ./services/fooocus/
image: sd-fooocus:latest
environment:
- CLI_ARGS=

72
docs/FAQ.md Normal file
View file

@ -0,0 +1,72 @@
# General
Unfortunately, AMD GPUs [#63](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/63) and MacOs [#35](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/35) are not supported, contributions to add support are very welcome.
## `auto` exists with error code 137
This is an indicator that the container does not have enough RAM, you need at least 12GB, recommended 16GB.
You might need to [adjust the size of the docker virtual machine RAM](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/296#issuecomment-1480318829) depending on your OS.
## Dockerfile parse error
```
Error response from daemon: dockerfile parse error line 33: unknown instruction: GIT
ERROR: Service 'model' failed to build : Build failed
```
Update docker to the latest version, and make sure you are using `docker compose` instead of `docker-compose`. [#16](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/16), also, try setting the environment variable `DOCKER_BUILDKIT=1`
## Unknown Flag `--profile`
Update docker to the latest version, and see [this comment](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/165#issuecomment-1296155667), try setting the environment variable mentioned in the previous point.
## Output is a always green image
use `--precision full --no-half`. [#9](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/9)
## Found no NVIDIA driver on your system even though the drivers are installed and `nvidia-smi` shows it
add `NVIDIA_DRIVER_CAPABILITIES=compute,utility` and `NVIDIA_VISIBLE_DEVICES=all` to container can resolve this problem [#348](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/348#issuecomment-1449250332)
---
# Linux
### Error response from daemon: could not select device driver "nvidia" with capabilities: `[[gpu]]`
Install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) and restart the docker service [#81](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/81)
### `docker compose --profile auto up --build` fails with `OSError`
This might be related to the `overlay2` storage driver used by docker overlayed on zfs, change to the `zfs` storage driver [#433](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/433#issuecomment-1694520689)
---
# Windows / WSL
## Build fails at [The Shell command](https://github.com/AbdBarho/stable-diffusion-webui-docker/blob/5af482ed8c975df6aa0210225ad68b218d4f61da/build/Dockerfile#L11), `/bin/bash` not found in WSL.
Edit the corresponding docker file, and change the SHELL from `/bin/bash` to `//bin/bash` [#21](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/21), note: this is a hack and something in your wsl is messed up.
## Build fails with credentials errors when logged in via SSH on WSL2/Windows
You can try forcing plain text auth creds storage by removing line with "credStore" from ~/.docker/config.json (in WSL). [#56](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/56)
## `unable to access 'https://github.com/...': Could not resolve host: github.com` or any domain
Set the `build/network` of the service you are starting to `host` [#114](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/114#issue-1393683083)
## Other build errors on windows
* Make sure:
* Windows 10 release >= 2021H2 (required for WSL to see the GPU)
* WSL2 (check with `wsl -l -v`)
* Latest Docker Desktop
* You might need to create a [`.wslconfig`](https://docs.microsoft.com/en-us/windows/wsl/wsl-config#example-wslconfig-file) and increase memory, if you have 16GB RAM, set the limit to something around 12GB, [#34](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/34) [#64](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/64)
* You might also need to [force wsl to allow file permissions](https://superuser.com/a/1646556)
---
# AWS
You have to use one of AWS's GPU-enabled VMs and their Deep Learning OS images. These have the right divers, the toolkit and all the rest already installed and optimized. [#70](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/70)

1
docs/Home.md Normal file
View file

@ -0,0 +1 @@
Welcome to the stable-diffusion-webui-docker wiki!

25
docs/Podman-Support.md Normal file
View file

@ -0,0 +1,25 @@
Thanks to [RedTopper](https://github.com/RedTopper) for this guide! [#352](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/352)
On an selinux machine like Fedora, append `:z` to the volume mounts. This tells selinux that the files are shared between containers (required for download and your UI of choice to access the same files). *Properly merging :z with an override will work in podman compose 1.0.7, specifically commit https://github.com/containers/podman-compose/commit/0b853f29f44b846bee749e7ae9a5b42679f2649f*
```yaml
x-base_service: &base_service
volumes:
- &v1 ./data:/data:z
- &v2 ./output:/output:z
```
You'll also need to add the runtime and security opts to allow access to your GPU. This can be specified in an override, no new versions required! More information can be found at this RedHat post: [How to enable NVIDIA GPUs in containers](https://www.redhat.com/en/blog/how-use-gpus-containers-bare-metal-rhel-8).
```yaml
x-base_service: &base_service
...
runtime: nvidia
security_opt:
- label=type:nvidia_container_t
```
I also had to add `,Z` to the pip/apt caches for them to work. On the first build everything will be fine without the fix, but on the second+ build, you may get a "file not found" when pip goes to install a package from the cache. Here's a script to do this easily, along with more info: https://github.com/RedTopper/Stable-Diffusion-Webui-Podman/blob/podman/selinux-cache.sh.
Lastly, delete all the services you don't want to use. *Using `--profile` will work in podman compose 1.0.7, specifically commit https://github.com/containers/podman-compose/commit/8d8df0bc2816d8e8fa142781d9018a06fe0d08ed*

11
docs/Screenshots.md Normal file
View file

@ -0,0 +1,11 @@
# AUTOMATIC1111
| Text to image | Image to image | Extras |
| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
| ![](https://user-images.githubusercontent.com/24505302/189541954-46afd772-d0c8-4005-874c-e2eca40c02f2.jpg) | ![](https://user-images.githubusercontent.com/24505302/189541956-5b528de7-1b5d-479f-a1db-d3f5a53afc59.jpg) | ![](https://user-images.githubusercontent.com/24505302/189541957-cf78b352-a071-486d-8889-f26952779a61.jpg) |
# lstein (invokeAI)
| Text to image | Image to image | Extras |
| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
| ![](https://user-images.githubusercontent.com/24505302/195158552-39f58cb6-cfcc-4141-9995-a626e3760752.jpg) | ![](https://user-images.githubusercontent.com/24505302/195158553-152a0ab8-c0fd-4087-b121-4823bcd8d6b5.jpg) | ![](https://user-images.githubusercontent.com/24505302/195158548-e118206e-c519-4915-85d6-4c248eb10fc0.jpg) |

51
docs/Setup.md Normal file
View file

@ -0,0 +1,51 @@
# Make sure you have the *latest* version of docker and docker compose installed
TLDR:
clone this repo and run:
```bash
docker compose --profile download up --build
# wait until its done, then:
docker compose --profile [ui] up --build
# where [ui] is one of: auto | auto-cpu | comfy | comfy-cpu
```
if you don't know which ui to choose, `auto` is good start.
Then access from http://localhost:7860/
Unfortunately, AMD GPUs [#63](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/63) and Mac [#35](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues/35) are not supported, contributions to add support are very welcome!!!!!!!!!!
If you face any problems, check the [FAQ page](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/FAQ), or [create a new issue](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues).
### Detailed Steps
First of all, clone this repo, you can do this with `git`, or you can download a zip file. Please always use the most up-to-date state from the `master` branch. Even though we have releases, everything is changing and breaking all the time.
After cloning, open a terminal in the folder and run:
```
docker compose --profile download up --build
```
This will download all of the required models / files, and validate their integrity. You only have to download the data once (regardless of the UI). There are roughly 12GB of data to be downloaded.
Next, choose which UI you want to run (you can easily change later):
- `auto`: The most popular fork, many features with neat UI, [Repo by AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- `auto-cpu`: for users without a GPU.
- `comfy`: A graph based workflow UI, very powerful, [Repo by comfyanonymous](https://github.com/comfyanonymous/ComfyUI)
After the download is done, you can run the UI using:
```bash
docker compose --profile [ui] up --build
# for example:
# docker compose --profile invoke up --build
# or
# docker compose --profile auto up --build
```
Will start the app on http://localhost:7860/. Feel free to try out the different UIs.
Note: the first start will take some time since other models will be downloaded, these will be cached in the `data` folder, so next runs are faster. First time setup might take between 15 minutes and 1 hour depending on your internet connection, other times are much faster, roughly 20 seconds.

73
docs/Usage.md Normal file
View file

@ -0,0 +1,73 @@
Assuming you [setup everything correctly](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Setup), you can run any UI (interchangeably, but not in parallel) using the command:
```bash
docker compose --profile [ui] up --build
```
where `[ui]` is one of `auto`, `auto-cpu`, `comfy`, or `comfy-cpu`.
### Mounts
The `data` and `output` folders are always mounted into the container as `/data` and `/output`, use them so if you want to transfer anything from / to the container.
### Updates
if you want to update to the latest version, just pull the changes
```bash
git pull
```
You can also checkout specific tags if you want.
### Customization
If you want to customize the behaviour of the uis, you can create a `docker-compose.override.yml` and override whatever you want from the [main `docker-compose.yml` file](https://github.com/AbdBarho/stable-diffusion-webui-docker/blob/master/docker-compose.yml). Example:
```yml
services:
auto:
environment:
- CLI_ARGS=--lowvram
```
Possible configuration:
# `auto`
By default: `--medvram` is given, which allow you to use this model on a 6GB GPU, you can also use `--lowvram` for lower end GPUs. Remove these arguments if you are using a (relatively) high end GPU, like 40XX series cards, as these arguments will slow you down.
[You can find the full list of cli arguments here.](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/shared.py)
### Custom models
Put the weights in the folder `data/StableDiffusion`, you can then change the model from the settings tab.
### General Config
There is multiple files in `data/config/auto` such as `config.json` and `ui-config.json` which let you which contain additional config for the UI.
### Scripts
put your scripts `data/config/auto/scripts` and restart the container
### Extensions
You can use the UI to install extensions, or, you can put your extensions in `data/config/auto/extensions`.
Different extensions require additional dependencies. Some of them might conflict with each other and changing versions of packages could break things. This container will try to install missing extension dependencies on startup, but it won't resolve any problems for you.
There is also the option to create a script `data/config/auto/startup.sh` which will be called on container startup, in case you want to install any additional dependencies for your extensions or anything else.
An example of your `startup.sh` might looks like this:
```sh
#!/bin/bash
# opencv-python-headless to not rely on opengl and drivers.
pip install -q --force-reinstall opencv-python-headless
```
NOTE: dependencies of extensions might get lost when you create a new container, hence the installing them in the startup script is important.
It is not recommended to modify the Dockerfile for the sole purpose of supporting some extension (unless you truly know what you are doing).
### **DONT OPEN AN ISSUE IF A SCRIPT OR AN EXTENSION IS NOT WORKING**
I maintain neither the UI nor the extension, I can't help you.
# `auto-cpu`
CPU instance of the above, some stuff might not work, use at your own risk.

View file

@ -1,4 +1,4 @@
FROM alpine/git:2.36.2 as download
FROM alpine/git:2.36.2 AS download
COPY clone.sh /clone.sh
@ -13,26 +13,26 @@ RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interr
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
RUN . /clone.sh stable-diffusion-webui-assets https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets 6f7db241d2f8ba7457bac5ca9753331f0c266917
FROM pytorch/pytorch:2.3.0-cuda12.1-cudnn8-runtime
#FROM pytorch/pytorch:2.7.1-cuda12.8-cudnn9-runtime
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && \
# we need those
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
apt-get install -y \
fonts-dejavu-core rsync git jq moreutils aria2 \
# extensions needs those
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential libgoogle-perftools-dev && \
apt-get clean
WORKDIR /
RUN --mount=type=cache,target=/root/.cache/pip \
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \
cd stable-diffusion-webui && \
git reset --hard v1.9.4 && \
pip install -r requirements_versions.txt
# git reset --hard v1.9.4 && \
pip install -r requirements_versions.txt && pip install --upgrade typing-extensions
ENV ROOT=/stable-diffusion-webui
@ -46,8 +46,6 @@ RUN --mount=type=cache,target=/root/.cache/pip \
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
# maybe move this up to the dependencies list.
RUN apt-get -y install libgoogle-perftools-dev && apt-get clean
ENV LD_PRELOAD=libtcmalloc.so
COPY . /docker
@ -60,7 +58,8 @@ RUN \
WORKDIR ${ROOT}
ENV NVIDIA_VISIBLE_DEVICES=all
ENV CLI_ARGS=""
EXPOSE 7860
ARG CLI_ARGS=""
ENV WEBUI_PORT=7860
EXPOSE $WEBUI_PORT
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}
CMD python -u webui.py --listen --port $WEBUI_PORT ${CLI_ARGS}

View file

@ -0,0 +1,74 @@
FROM alpine/git:2.36.2 as download
COPY clone.sh /clone.sh
RUN rm -rf "/usr/local/share/boost"
RUN rm -rf "$AGENT_TOOLSDIRECTORY"
RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
&& rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
&& rm -rf assets inputs
RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
RUN . /clone.sh stable-diffusion-webui-assets https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets 6f7db241d2f8ba7457bac5ca9753331f0c266917
FROM rocm/pytorch:rocm6.0.2_ubuntu22.04_py3.10_pytorch_2.1.2
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && \
# we need those
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
# extensions needs those
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential libgoogle-perftools-dev && \
apt-get clean
RUN python -m pip install --upgrade pip wheel
WORKDIR /
RUN --mount=type=cache,target=/root/.cache/pip \
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \
cd stable-diffusion-webui && \
git reset --hard cf2772fab0af5573da775e7437e6acdca424f26e && \
pip install -r requirements_versions.txt
ENV ROOT=/stable-diffusion-webui
COPY --from=download /repositories/ ${ROOT}/repositories/
RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate
RUN --mount=type=cache,target=/root/.cache/pip \
pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip \
pip install pyngrok xformers==0.0.23.post1 \
git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \
git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
# maybe move this up to the dependencies list.
RUN apt-get -y install libgoogle-perftools-dev && apt-get clean
ENV LD_PRELOAD=libtcmalloc.so
COPY . /docker
RUN \
# mv ${ROOT}/style.css ${ROOT}/user.css && \
# one of the ugliest hacks I ever wrote \
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/envs/py_3.10/lib/python3.10/site-packages/gradio/routes.py && \
git config --global --add safe.directory '*'
WORKDIR ${ROOT}
ENV CLI_ARGS=""
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}

View file

@ -8,4 +8,4 @@ git init
git remote add origin "$2"
git fetch origin "$3" --depth=1
git reset --hard "$3"
rm -rf .git
rm -rf .git

View file

@ -1,5 +1,5 @@
#!/bin/bash
#set -x
set -Eeuo pipefail
# TODO: move all mkdir -p ?
@ -26,12 +26,13 @@ fi
# copy models from original models folder
mkdir -p /data/models/VAE-approx/ /data/models/karlo/
rsync -a --info=NAME ${ROOT}/models/VAE-approx/ /data/models/VAE-approx/
rsync -a --info=NAME ${ROOT}/models/karlo/ /data/models/karlo/
rsync --info=NAME ${ROOT}/models/VAE-approx/ /data/models/VAE-approx/
rsync --info=NAME ${ROOT}/models/karlo/ /data/models/karlo/
declare -A MOUNTS
MOUNTS["/root/.cache"]="/data/.cache"
#MOUNTS["${USER_HOME}/.cache"]="/data/.cache"
MOUNTS["${ROOT}/models"]="/data/models"
MOUNTS["${ROOT}/embeddings"]="/data/embeddings"
@ -50,6 +51,7 @@ for to_path in "${!MOUNTS[@]}"; do
rm -rf "${to_path}"
if [ ! -f "$from_path" ]; then
mkdir -vp "$from_path"
# mkdir -vp "$from_path" || true
fi
mkdir -vp "$(dirname "${to_path}")"
ln -sT "${from_path}" "${to_path}"
@ -58,9 +60,11 @@ done
echo "Installing extension dependencies (if any)"
# because we build our container as root:
chown -R root ~/.cache/
chmod 766 ~/.cache/
#chown -R $PUID:$PGID ~/.cache/
#chmod 766 ~/.cache/
#
#chown -R $PUID:$PGID /output
#chmod 766 /output
shopt -s nullglob
# For install.py, please refer to https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions#installpy

View file

@ -1,22 +1,74 @@
FROM pytorch/pytorch:2.3.0-cuda12.1-cudnn8-runtime
# Defines the versions of ComfyUI, ComfyUI Manager, and PyTorch to use
ARG COMFYUI_VERSION=v0.3.59
ARG COMFYUI_MANAGER_VERSION=3.35
ARG PYTORCH_VERSION=2.8.0-cuda12.9-cudnn9-runtime
# This image is based on the latest official PyTorch image, because it already contains CUDA, CuDNN, and PyTorch
FROM pytorch/pytorch:${PYTORCH_VERSION}
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN apt-get update && apt-get install -y git && apt-get clean
RUN apt update --assume-yes && \
apt install --assume-yes \
git \
sudo \
build-essential \
libgl1-mesa-glx \
libglib2.0-0 \
libsm6 \
libxext6 \
ffmpeg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
ENV ROOT=/stable-diffusion
RUN --mount=type=cache,target=/root/.cache/pip \
git clone https://github.com/comfyanonymous/ComfyUI.git ${ROOT} && \
cd ${ROOT} && \
git checkout master && \
git reset --hard 276f8fce9f5a80b500947fb5745a4dde9e84622d && \
pip install -r requirements.txt
# Clones the ComfyUI repository and checks out the latest release
RUN git clone --depth=1 https://github.com/comfyanonymous/ComfyUI.git /opt/comfyui && \
cd /opt/comfyui && \
git fetch origin ${COMFYUI_VERSION} && \
git checkout FETCH_HEAD
WORKDIR ${ROOT}
# Clones the ComfyUI Manager repository and checks out the latest release; ComfyUI Manager is an extension for ComfyUI that enables users to install
# custom nodes and download models directly from the ComfyUI interface; instead of installing it to "/opt/comfyui/custom_nodes/ComfyUI-Manager", which
# is the directory it is meant to be installed in, it is installed to its own directory; the entrypoint will symlink the directory to the correct
# location upon startup; the reason for this is that the ComfyUI Manager must be installed in the same directory that it installs custom nodes to, but
# this directory is mounted as a volume, so that the custom nodes are not installed inside of the container and are not lost when the container is
# removed; this way, the custom nodes are installed on the host machine
RUN git clone --depth=1 https://github.com/Comfy-Org/ComfyUI-Manager.git /opt/comfyui-manager && \
cd /opt/comfyui-manager && \
git fetch origin ${COMFYUI_MANAGER_VERSION} && \
git checkout FETCH_HEAD
# Installs the required Python packages for both ComfyUI and the ComfyUI Manager
RUN pip install \
--requirement /opt/comfyui/requirements.txt \
--requirement /opt/comfyui-manager/requirements.txt
RUN pip3 install --no-cache-dir \
opencv-python \
diffusers \
triton \
sageattention \
psutil
# Pre-install previously used custom nodes requirements from volume
COPY ./install/merged-requirements.txt* /docker/requirements.txt
RUN sh -c '[ -f /docker/requirements.txt ] && pip install --no-cache-dir -r /docker/requirements.txt \
|| echo "merged-requirements.txt not found, skipping pre-install."'
# Sets the working directory to the ComfyUI directory
WORKDIR /opt/comfyui
COPY . /docker/
RUN chmod u+x /docker/entrypoint.sh && cp /docker/extra_model_paths.yaml ${ROOT}
RUN chmod u+x /docker/entrypoint.sh && cp /docker/extra_model_paths.yaml /opt/comfyui
ENV NVIDIA_VISIBLE_DEVICES=all PYTHONPATH="${PYTHONPATH}:${PWD}" CLI_ARGS=""
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u main.py --listen --port 7860 ${CLI_ARGS}
ENV NVIDIA_VISIBLE_DEVICES=all PYTHONPATH="\${PYTHONPATH}:\${PWD}" CLI_ARGS=""
EXPOSE 7861
# Adds the startup script to the container; the startup script will create all necessary directories in the models and custom nodes volumes that were
# mounted to the container and symlink the ComfyUI Manager to the correct directory; it will also create a user with the same UID and GID as the user
# that started the container, so that the files created by the container are owned by the user that started the container and not the root user
ENTRYPOINT ["/bin/bash", "/docker/entrypoint.sh"]
# On startup, ComfyUI is started at its default port; the IP address is changed from localhost to 0.0.0.0, because Docker is only forwarding traffic
# to the IP address it assigns to the container, which is unknown at build time; listening to 0.0.0.0 means that ComfyUI listens to all incoming
# traffic; the auto-launch feature is disabled, because we do not want (nor is it possible) to open a browser window in a Docker container
CMD ["/opt/conda/bin/python", "main.py", "--listen", "0.0.0.0", "--port", "7861", "--disable-auto-launch"]

View file

@ -1,31 +1,71 @@
#!/bin/bash
set -Eeuo pipefail
mkdir -vp /data/config/comfy/custom_nodes
declare -A MOUNTS
MOUNTS["/root/.cache"]="/data/.cache"
MOUNTS["${ROOT}/input"]="/data/config/comfy/input"
MOUNTS["${ROOT}/output"]="/output/comfy"
for to_path in "${!MOUNTS[@]}"; do
set -Eeuo pipefail
from_path="${MOUNTS[${to_path}]}"
rm -rf "${to_path}"
if [ ! -f "$from_path" ]; then
mkdir -vp "$from_path"
fi
mkdir -vp "$(dirname "${to_path}")"
ln -sT "${from_path}" "${to_path}"
echo Mounted $(basename "${from_path}")
# Creates the directories for the models inside of the volume that is mounted from the host
echo "Creating directories for models..."
MODEL_DIRECTORIES=(
"checkpoints"
"clip"
"clip_vision"
"configs"
"controlnet"
"diffusers"
"diffusion_models"
"embeddings"
"gligen"
"hypernetworks"
"loras"
"photomaker"
"style_models"
"text_encoders"
"unet"
"upscale_models"
"vae"
"vae_approx"
)
for MODEL_DIRECTORY in ${MODEL_DIRECTORIES[@]}; do
mkdir -p /opt/comfyui/models/$MODEL_DIRECTORY
done
if [ -f "/data/config/comfy/startup.sh" ]; then
pushd ${ROOT}
. /data/config/comfy/startup.sh
popd
fi
# Creates the symlink for the ComfyUI Manager to the custom nodes directory, which is also mounted from the host
echo "Creating symlink for ComfyUI Manager..."
rm --force /opt/comfyui/custom_nodes/ComfyUI-Manager
ln -s \
/opt/comfyui-manager \
/opt/comfyui/custom_nodes/ComfyUI-Manager
exec "$@"
# The custom nodes that were installed using the ComfyUI Manager may have requirements of their own, which are not installed when the container is
# started for the first time; this loops over all custom nodes and installs the requirements of each custom node
echo "Installing requirements for custom nodes..."
for CUSTOM_NODE_DIRECTORY in /opt/comfyui/custom_nodes/*;
do
if [ "$CUSTOM_NODE_DIRECTORY" != "/opt/comfyui/custom_nodes/ComfyUI-Manager" ];
then
if [ -f "$CUSTOM_NODE_DIRECTORY/requirements.txt" ];
then
CUSTOM_NODE_NAME=${CUSTOM_NODE_DIRECTORY##*/}
CUSTOM_NODE_NAME=${CUSTOM_NODE_NAME//[-_]/ }
echo "Installing requirements for $CUSTOM_NODE_NAME..."
pip install --requirement "$CUSTOM_NODE_DIRECTORY/requirements.txt"
fi
fi
done
# Under normal circumstances, the container would be run as the root user, which is not ideal, because the files that are created by the container in
# the volumes mounted from the host, i.e., custom nodes and models downloaded by the ComfyUI Manager, are owned by the root user; the user can specify
# the user ID and group ID of the host user as environment variables when starting the container; if these environment variables are set, a non-root
# user with the specified user ID and group ID is created, and the container is run as this user
if [ -z "$USER_ID" ] || [ -z "$GROUP_ID" ];
then
echo "Running container as $USER..."
exec "$@"
else
echo "Creating non-root user..."
getent group $GROUP_ID > /dev/null 2>&1 || groupadd --gid $GROUP_ID comfyui-user
id -u $USER_ID > /dev/null 2>&1 || useradd --uid $USER_ID --gid $GROUP_ID --create-home comfyui-user
chown --recursive $USER_ID:$GROUP_ID /opt/comfyui
chown --recursive $USER_ID:$GROUP_ID /opt/comfyui-manager
export PATH=$PATH:/home/comfyui-user/.local/bin
echo "Running container as $USER..."
sudo --set-home --preserve-env=PATH --user \#$USER_ID "$@"
fi

View file

@ -1,25 +1,34 @@
a111:
base_path: /data
base_path: /opt/comfyui
# base_path: /data
checkpoints: models/Stable-diffusion
configs: models/Stable-diffusion
configs: user/default
# configs: models/configs
vae: models/VAE
loras: models/Lora
loras: |
models/Lora
models/loras
hypernetworks: models/hypernetworks
controlnet: models/controlnet
gligen: models/GLIGEN
clip: models/CLIPEncoder
embeddings: embeddings
unet: models/unet
upscale_models: |
models/RealESRGAN
models/ESRGAN
models/SwinIR
models/GFPGAN
hypernetworks: models/hypernetworks
controlnet: models/ControlNet
gligen: models/GLIGEN
clip: models/CLIPEncoder
embeddings: embeddings
custom_nodes: config/comfy/custom_nodes
models/upscale_models
diffusion_models: models/diffusion_models
text_encoders: models/text_encoders
clip_vision: models/clip_vision
custom_nodes: /opt/comfyui/custom_nodes
# custom_nodes: config/comfy/custom_nodes
# TODO: I am unsure about these, need more testing
# style_models: config/comfy/style_models
# t2i_adapter: config/comfy/t2i_adapter
# clip_vision: config/comfy/clip_vision
# diffusers: config/comfy/diffusers

2
services/comfy/install/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
reqs
merged-requirements.txt

View file

@ -0,0 +1,52 @@
#!/bin/bash
# Get custom nodes requirements and merge latest versions
REQ_PATH="data/config/comfy/custom_nodes"
BUILD_PATH=$(dirname "$0")
mkdir -p ${BUILD_PATH}/reqs
for f in ${REQ_PATH}/*/requirements.txt; do \
node=$(basename $(dirname "$f")); \
cp "$f" ${BUILD_PATH}/reqs/${node}-requirements.txt; \
done
find ${BUILD_PATH}/reqs -maxdepth 1 -name "*requirements.txt" -exec cat {} + \
| grep -v '^#' \
| grep -v '^git' \
| sed 's/==.*//' \
| awk '{print tolower($0)}' \
| sed 's/[[:space:]]//g' \
| sort -u \
| awk '
{
line = $0;
if (line ~ /^[[:space:]]*$/) { next }
if (line ~ /git\+/ || line ~ /\[.*\]/) {
print "Z_" line, "0", line
next
}
split(line, a, "[<>=]")
package = a[1]
version = a[2]
gsub(/[[:space:]]+/, "", package)
gsub(/_/, "-", package)
if (version == "") {
version = "0"
}
print package, version, line
}
' \
| sort -k1,1 -V -k2,2 \
| awk '
{
if (prev_package != $1) {
if (NR > 1) {
print prev_line
}
prev_package = $1
}
prev_line = $3
}
END {
print prev_line
}
' \
> ${BUILD_PATH}/merged-requirements.txt

View file

@ -1,6 +1,12 @@
FROM bash:alpine3.19
ARG PUID=0
ARG PGID=0
# set build args as container environment variables for entrypoint reference
ENV PUID=$PUID
ENV PGID=$PGID
RUN apk update && apk add parallel aria2
COPY . /docker
COPY --chown=$PUID:$PGID . /docker
RUN chmod +x /docker/download.sh
ENTRYPOINT ["/docker/download.sh"]

View file

@ -0,0 +1,85 @@
FROM alpine:3.17 as xformers
RUN apk add --no-cache aria2
RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/6.0.0/xformers-0.0.21.dev544-cp310-cp310-manylinux2014_x86_64-pytorch201.whl'
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN apt-get update && apt-get install -y git libglib2.0-0 libgl1-mesa-glx python-dev libgoogle-perftools-dev && apt-get clean
ARG PUID=0
ARG PGID=0
ARG USER_HOME=/root
# set build args as container environment variables for entrypoint reference
ENV PUID=$PUID
ENV PGID=$PGID
ENV USER_HOME=$USER_HOME
# if user home does not exist, create it
RUN mkdir -p "$USER_HOME"
# home already exists, chown it
RUN chown -R "${PUID}:${PGID}" "$USER_HOME"
# Only groupadd if we're non root
RUN if [ "$PGID" -ne "0" ]; then \
echo non root group detected; \
groupadd \
--gid "$PGID" \
stablediffusion ;\
else \
echo "root group detected" ; \
fi
# Only useradd if we're non root
RUN if [ "$PUID" -ne "0" ]; then \
echo non root user detected; \
useradd \
--gid="$PGID" \
--no-user-group \
-M \
--home "$USER_HOME" \
stablediffusion ; \
else \
echo "root group detected" ; \
fi
# set this to your target branch commit
ARG BRANCH=main SHA=e2f9bcb11d06216d6800676c48d8d74d6fd77a4b
ENV ROOT=/stable-diffusion
# drop permissions (if build targets non root)
USER $PUID:$PGID
RUN --mount=type=cache,target=${USER_HOME}/.cache/pip \
git clone https://github.com/lllyasviel/Fooocus.git ${ROOT} && \
cd ${ROOT} && \
git checkout ${BRANCH} && \
git reset --hard ${SHA} && \
pip install -r requirements_versions.txt
RUN chown -R "$PUID:$PGID" "${ROOT}"
RUN --mount=type=cache,target=${USER_HOME}/.cache/pip \
--mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.21-cp310-cp310-linux_x86_64.whl \
pip install /xformers-0.0.21-cp310-cp310-linux_x86_64.whl
WORKDIR ${ROOT}
RUN --mount=type=cache,target=${USER_HOME}/.cache/pip \
git fetch && \
git checkout ${BRANCH} && \
git reset --hard ${SHA} && \
pip install -r requirements_versions.txt
# add info
COPY --chown=$PUID:$PGID . /docker
RUN cp /docker/config.txt ${ROOT}
RUN chmod u+x /docker/entrypoint.sh
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u entry_with_update.py --listen --port 7860 ${CLI_ARGS}

View file

@ -0,0 +1,12 @@
{
"path_checkpoints": "/stable-diffusion/models/checkpoints",
"path_loras": "/stable-diffusion/models/loras",
"path_embeddings": "/stable-diffusion/models/embeddings",
"path_vae_approx": "/stable-diffusion/models/vae_approx",
"path_upscale_models": "/stable-diffusion/models/upscale_models",
"path_inpaint": "/stable-diffusion/models/inpaint",
"path_controlnet": "/stable-diffusion/models/controlnet",
"path_clip_vision": "/stable-diffusion/models/clip_vision",
"path_fooocus_expansion": "/stable-diffusion/models/prompt_expansion/fooocus_expansion",
"path_outputs": "/stable-diffusion/outputs"
}

View file

@ -0,0 +1,33 @@
#!/bin/bash
set -Eeuo pipefail
mkdir -vp /data/config/fooocus/wildcards
declare -A MOUNTS
MOUNTS["${ROOT}/outputs"]="/output/fooocus"
# ui specific mounts
MOUNTS["${ROOT}/models/checkpoints"]=/data/models/Stable-diffusion/
MOUNTS["${ROOT}/models/loras"]=/data/models/Lora/
MOUNTS["${ROOT}/models/embeddings"]=/data/models/embeddings/
MOUNTS["${ROOT}/models/vae_approx"]=/data/models/VAE/
MOUNTS["${ROOT}/models/upscale_models"]=/data/models/upscale_models/
MOUNTS["${ROOT}/wildcards"]=/data/config/fooocus/wildcards
for to_path in "${!MOUNTS[@]}"; do
set -Eeuo pipefail
from_path="${MOUNTS[${to_path}]}"
rm -rf "${to_path}"
mkdir -p "$(dirname "${to_path}")"
# ends with slash, make it!
if [[ "$from_path" == */ ]]; then
mkdir -vp "$from_path"
fi
ln -sT "${from_path}" "${to_path}"
echo Mounted $(basename "${from_path}")
done
exec "$@"

127
services/forge/Dockerfile Normal file
View file

@ -0,0 +1,127 @@
FROM alpine/git:2.36.2 AS download
COPY clone.sh /clone.sh
RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
&& rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
&& rm -rf assets inputs
RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
RUN . /clone.sh stable-diffusion-webui-assets https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git 6f7db241d2f8ba7457bac5ca9753331f0c266917
RUN . /clone.sh huggingface_guess https://github.com/lllyasviel/huggingface_guess.git 70942022b6bcd17d941c1b4172804175758618e2
FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN --mount=type=cache,target=/root/apt \
apt-get update && \
# we need those
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
# extensions needs those
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential libgoogle-perftools-dev && \
apt-get clean
ARG PUID=0
ARG PGID=0
ARG USER_HOME=/root
# set build args as container environment variables for entrypoint reference
ENV PUID=$PUID
ENV PGID=$PGID
ENV USER_HOME=$USER_HOME
# if user home does not exist, create it
RUN mkdir -p "$USER_HOME"
# home already exists, chown it
RUN chown -R "${PUID}:${PGID}" "$USER_HOME"
# Only groupadd if we're non root
RUN if [ "$PGID" -ne "0" ]; then \
echo non root group detected; \
groupadd \
--gid "$PGID" \
stablediffusion ;\
else \
echo "root group detected" ; \
fi
# Only useradd if we're non root
RUN if [ "$PUID" -ne "0" ]; then \
echo non root user detected; \
useradd \
--gid="$PGID" \
--no-user-group \
-M \
--home "$USER_HOME" \
stablediffusion ; \
else \
echo "root group detected" ; \
fi
ENV ROOT=/stable-diffusion-webui-forge
WORKDIR /
RUN --mount=type=cache,target=/root/.cache/forge-repo \
git clone https://github.com/lllyasviel/stable-diffusion-webui-forge.git && \
cd stable-diffusion-webui-forge && \
sed -i '/torch/d' requirements_versions.txt && \
pip install -r requirements_versions.txt
RUN if [ -d "/opt/conda/lib/python3.10" ]; then \
echo Python 3.10 detected; \
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.10/site-packages/gradio/routes.py ;\
elif [ -d "/opt/conda/lib/python3.11" ]; then \
echo Python 3.11 detected; \
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.11/site-packages/gradio/routes.py ;\
fi && \
# mv ${ROOT}/style.css ${ROOT}/user.css && \
# one of the ugliest hacks I ever wrote \
# updated from 3.10.to 3.11
git config --global --add safe.directory '*'
# drop permissions (if build targets non root)
USER $PUID:$PGID
COPY --from=download --chown=${PUID}:${PGID} /repositories/ ${ROOT}/repositories/
RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate
RUN --mount=type=cache,target=/root/.cache/codeformer pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt
# Clone and copy huggingface_guess module
#RUN git clone https://github.com/lllyasviel/huggingface_guess.git /tmp/huggingface_guess && \
# cp -r /tmp/huggingface_guess/huggingface_guess ${ROOT}/huggingface_guess
# Ensure torchvision is correctly installed
RUN --mount=type=cache,target=/root/.cache/torch pip install torchvision==0.18.1
RUN --mount=type=cache,target=/root/.cache/repos \
pip install pyngrok xformers==0.0.27 pytorch_lightning torchdiffeq torchsde \
git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \
git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
# maybe move this up to the dependencies list.
ENV LD_PRELOAD=libtcmalloc.so
COPY . /docker
RUN chown -R "$PUID:$PGID" "${ROOT}"
RUN chown -R "$PUID:$PGID" /docker
WORKDIR ${ROOT}
ENV NVIDIA_VISIBLE_DEVICES=all
ENV CLI_ARGS=""
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}

11
services/forge/clone.sh Normal file
View file

@ -0,0 +1,11 @@
#!/bin/bash
set -Eeuox pipefail
mkdir -p /repositories/"$1"
cd /repositories/"$1"
git init
git remote add origin "$2"
git fetch origin "$3" --depth=1
git reset --hard "$3"
rm -rf .git

78
services/forge/config.py Normal file
View file

@ -0,0 +1,78 @@
#!/usr/bin/env python3
"""Checks and sets default values for config.json before starting the container."""
import json
import re
import os.path
import sys
DEFAULT_FILEPATH = '/data/config/forge/config.json'
DEFAULT_OUTDIRS = {
"outdir_samples": "",
"outdir_txt2img_samples": "/output/txt2img",
"outdir_img2img_samples": "/output/img2img",
"outdir_extras_samples": "/output/extras",
"outdir_grids": "",
"outdir_txt2img_grids": "/output/txt2img-grids",
"outdir_img2img_grids": "/output/img2img-grids",
"outdir_save": "/output/saved",
"outdir_init_images": "/output/init-images",
}
RE_VALID_OUTDIR = re.compile(r"(^/output(/\.?[\w\-\_]+)+/?$)|(^\s?$)")
DEFAULT_OTHER = {
"font": "DejaVuSans.ttf",
}
def dict_to_json_file(target_file: str, data: dict):
"""Write dictionary to specified json file"""
with open(target_file, 'w') as f:
json.dump(data, f)
def json_file_to_dict(config_file: str) -> dict|None:
"""Load json file into a dictionary. Return None if file does not exist."""
if os.path.isfile(config_file):
with open(config_file, 'r') as f:
return json.load(f)
else:
return None
def replace_if_invalid(value: str, replacement: str, pattern: str|re.Pattern[str]) -> str:
"""Returns original value if valid, fallback value if invalid"""
if re.match(pattern, value):
return value
else:
return replacement
def check_and_replace_config(config_file: str, target_file: str = None):
"""Checks given file for invalid values. Replaces those with fallback values (default: overwrites file)."""
# Get current user config, or empty if file does not exists
data = json_file_to_dict(config_file) or {}
# Check and fix output directories
for k, def_val in DEFAULT_OUTDIRS.items():
if k not in data:
data[k] = def_val
else:
data[k] = replace_if_invalid(value=data[k], replacement=def_val, pattern=RE_VALID_OUTDIR)
# Check and fix other default settings
for k, def_val in DEFAULT_OTHER.items():
if k not in data:
data[k] = def_val
# Write results to file
dict_to_json_file(target_file or config_file, data)
if __name__ == '__main__':
if len(sys.argv) > 1:
check_and_replace_config(*sys.argv[1:])
else:
check_and_replace_config(DEFAULT_FILEPATH)

86
services/forge/entrypoint.sh Executable file
View file

@ -0,0 +1,86 @@
#!/bin/bash
set -Eeuo pipefail
# TODO: move all mkdir -p ?
mkdir -p /data/config/forge/scripts/
# mount scripts individually
echo $ROOT
ls -lha $ROOT
find "${ROOT}/scripts/" -maxdepth 1 -type l -delete
cp -vrfTs /data/config/forge/scripts/ "${ROOT}/scripts/"
# Set up config file
python /docker/config.py /data/config/forge/config.json
if [ ! -f /data/config/forge/ui-config.json ]; then
echo '{}' >/data/config/forge/ui-config.json
fi
if [ ! -f /data/config/forge/styles.csv ]; then
touch /data/config/forge/styles.csv
fi
# copy models from original models folder
mkdir -p /data/models/VAE-approx/ /data/models/karlo/
rsync -a --info=NAME ${ROOT}/models/VAE-approx/ /data/models/VAE-approx/
rsync -a --info=NAME ${ROOT}/models/karlo/ /data/models/karlo/
declare -A MOUNTS
MOUNTS["${USER_HOME}/.cache"]="/data/.cache"
MOUNTS["${ROOT}/models"]="/data/models"
MOUNTS["${ROOT}/embeddings"]="/data/embeddings"
MOUNTS["${ROOT}/config.json"]="/data/config/forge/config.json"
MOUNTS["${ROOT}/ui-config.json"]="/data/config/forge/ui-config.json"
MOUNTS["${ROOT}/styles.csv"]="/data/config/forge/styles.csv"
MOUNTS["${ROOT}/extensions"]="/data/config/forge/extensions"
MOUNTS["${ROOT}/config_states"]="/data/config/forge/config_states"
# extra hacks
MOUNTS["${ROOT}/repositories/CodeFormer/weights/facelib"]="/data/.cache"
for to_path in "${!MOUNTS[@]}"; do
set -Eeuo pipefail
from_path="${MOUNTS[${to_path}]}"
rm -rf "${to_path}"
if [ ! -f "$from_path" ]; then
mkdir -vp "$from_path"
fi
mkdir -vp "$(dirname "${to_path}")"
ln -sT "${from_path}" "${to_path}"
echo Mounted $(basename "${from_path}")
done
chown -R $PUID:$PGID ~/.cache/
chmod 766 ~/.cache/
chown -R $PUID:$PGID /output
chmod 766 /output
echo "Installing extension dependencies (if any)"
shopt -s nullglob
# For install.py, please refer to https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions#installpy
list=(./extensions/*/install.py)
for installscript in "${list[@]}"; do
EXTNAME=$(echo $installscript | cut -d '/' -f 3)
# Skip installing dependencies if extension is disabled in config
if $(jq -e ".disabled_extensions|any(. == \"$EXTNAME\")" config.json); then
echo "Skipping disabled extension ($EXTNAME)"
continue
fi
PYTHONPATH=${ROOT} python "$installscript"
done
if [ -f "/data/config/forge/startup.sh" ]; then
pushd ${ROOT}
echo "Running startup script"
. /data/config/forge/startup.sh
popd
fi
exec "$@"

View file

@ -0,0 +1,84 @@
FROM alpine/git:2.36.2 AS download
COPY clone.sh /clone.sh
RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
&& rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
&& rm -rf assets inputs
RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
RUN . /clone.sh stable-diffusion-webui-assets https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git 6f7db241d2f8ba7457bac5ca9753331f0c266917
FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && \
# we need those
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
# extensions needs those
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential
WORKDIR /
RUN --mount=type=cache,target=/root/.cache/pip \
git clone https://github.com/Panchovix/stable-diffusion-webui-reForge.git /stable-diffusion-webui-reforge && \
cd stable-diffusion-webui-reforge && \
sed -i '/torch/d' requirements_versions.txt && \
pip install -r requirements_versions.txt
ENV ROOT=/stable-diffusion-webui-reforge
COPY --from=download /repositories/ ${ROOT}/repositories/
RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate
RUN --mount=type=cache,target=/root/.cache/pip \
pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt
# Clone and copy huggingface_guess module
RUN git clone https://github.com/lllyasviel/huggingface_guess.git /tmp/huggingface_guess && \
cp -r /tmp/huggingface_guess/huggingface_guess ${ROOT}/huggingface_guess
# Ensure torchvision is correctly installed
RUN --mount=type=cache,target=/root/.cache/pip \
pip install torchvision==0.18.1
RUN --mount=type=cache,target=/root/.cache/pip \
pip install pyngrok xformers==0.0.27 pytorch_lightning==1.6.5 torchdiffeq torchsde \
git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \
git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
# maybe move this up to the dependencies list.
RUN apt-get -y install libgoogle-perftools-dev && apt-get clean
ENV LD_PRELOAD=libtcmalloc.so
COPY . /docker
RUN pip install pydantic==1.10.21
RUN if [ -d "/opt/conda/lib/python3.10" ]; then \
echo Python 3.10 detected; \
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.10/site-packages/gradio/routes.py ;\
elif [ -d "/opt/conda/lib/python3.11" ]; then \
echo Python 3.11 detected; \
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.11/site-packages/gradio/routes.py ;\
fi && \
# mv ${ROOT}/style.css ${ROOT}/user.css && \
# one of the ugliest hacks I ever wrote \
# updated from 3.10.to 3.11
git config --global --add safe.directory '*'
WORKDIR ${ROOT}
ENV NVIDIA_VISIBLE_DEVICES=all
ENV CLI_ARGS=""
EXPOSE 7860
ENTRYPOINT ["/docker/entrypoint.sh"]
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}

11
services/reforge/clone.sh Normal file
View file

@ -0,0 +1,11 @@
#!/bin/bash
set -Eeuox pipefail
mkdir -p /repositories/"$1"
cd /repositories/"$1"
git init
git remote add origin "$2"
git fetch origin "$3" --depth=1
git reset --hard "$3"
rm -rf .git

View file

@ -0,0 +1,78 @@
#!/usr/bin/env python3
"""Checks and sets default values for config.json before starting the container."""
import json
import re
import os.path
import sys
DEFAULT_FILEPATH = '/data/config/reforge/config.json'
DEFAULT_OUTDIRS = {
"outdir_samples": "",
"outdir_txt2img_samples": "/output/txt2img",
"outdir_img2img_samples": "/output/img2img",
"outdir_extras_samples": "/output/extras",
"outdir_grids": "",
"outdir_txt2img_grids": "/output/txt2img-grids",
"outdir_img2img_grids": "/output/img2img-grids",
"outdir_save": "/output/saved",
"outdir_init_images": "/output/init-images",
}
RE_VALID_OUTDIR = re.compile(r"(^/output(/\.?[\w\-\_]+)+/?$)|(^\s?$)")
DEFAULT_OTHER = {
"font": "DejaVuSans.ttf",
}
def dict_to_json_file(target_file: str, data: dict):
"""Write dictionary to specified json file"""
with open(target_file, 'w') as f:
json.dump(data, f)
def json_file_to_dict(config_file: str) -> dict|None:
"""Load json file into a dictionary. Return None if file does not exist."""
if os.path.isfile(config_file):
with open(config_file, 'r') as f:
return json.load(f)
else:
return None
def replace_if_invalid(value: str, replacement: str, pattern: str|re.Pattern[str]) -> str:
"""Returns original value if valid, fallback value if invalid"""
if re.match(pattern, value):
return value
else:
return replacement
def check_and_replace_config(config_file: str, target_file: str = None):
"""Checks given file for invalid values. Replaces those with fallback values (default: overwrites file)."""
# Get current user config, or empty if file does not exists
data = json_file_to_dict(config_file) or {}
# Check and fix output directories
for k, def_val in DEFAULT_OUTDIRS.items():
if k not in data:
data[k] = def_val
else:
data[k] = replace_if_invalid(value=data[k], replacement=def_val, pattern=RE_VALID_OUTDIR)
# Check and fix other default settings
for k, def_val in DEFAULT_OTHER.items():
if k not in data:
data[k] = def_val
# Write results to file
dict_to_json_file(target_file or config_file, data)
if __name__ == '__main__':
if len(sys.argv) > 1:
check_and_replace_config(*sys.argv[1:])
else:
check_and_replace_config(DEFAULT_FILEPATH)

85
services/reforge/entrypoint.sh Executable file
View file

@ -0,0 +1,85 @@
#!/bin/bash
set -Eeuo pipefail
# TODO: move all mkdir -p ?
mkdir -p /data/config/reforge/scripts/
# mount scripts individually
echo $ROOT
ls -lha $ROOT
find "${ROOT}/scripts/" -maxdepth 1 -type l -delete
cp -vrfTs /data/config/reforge/scripts/ "${ROOT}/scripts/"
# Set up config file
python /docker/config.py /data/config/reforge/config.json
if [ ! -f /data/config/reforge/ui-config.json ]; then
echo '{}' >/data/config/reforge/ui-config.json
fi
if [ ! -f /data/config/reforge/styles.csv ]; then
touch /data/config/reforge/styles.csv
fi
# copy models from original models folder
mkdir -p /data/models/VAE-approx/ /data/models/karlo/
rsync -a --info=NAME ${ROOT}/models/VAE-approx/ /data/models/VAE-approx/
rsync -a --info=NAME ${ROOT}/models/karlo/ /data/models/karlo/
declare -A MOUNTS
MOUNTS["/root/.cache"]="/data/.cache"
MOUNTS["${ROOT}/models"]="/data/models"
MOUNTS["${ROOT}/embeddings"]="/data/embeddings"
MOUNTS["${ROOT}/config.json"]="/data/config/reforge/config.json"
MOUNTS["${ROOT}/ui-config.json"]="/data/config/reforge/ui-config.json"
MOUNTS["${ROOT}/styles.csv"]="/data/config/reforge/styles.csv"
MOUNTS["${ROOT}/extensions"]="/data/config/reforge/extensions"
MOUNTS["${ROOT}/config_states"]="/data/config/reforge/config_states"
# extra hacks
MOUNTS["${ROOT}/repositories/CodeFormer/weights/facelib"]="/data/.cache"
for to_path in "${!MOUNTS[@]}"; do
set -Eeuo pipefail
from_path="${MOUNTS[${to_path}]}"
rm -rf "${to_path}"
if [ ! -f "$from_path" ]; then
mkdir -vp "$from_path"
fi
mkdir -vp "$(dirname "${to_path}")"
ln -sT "${from_path}" "${to_path}"
echo Mounted $(basename "${from_path}")
done
echo "Installing extension dependencies (if any)"
# because we build our container as root:
chown -R root ~/.cache/
chmod 766 ~/.cache/
shopt -s nullglob
# For install.py, please refer to https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions#installpy
list=(./extensions/*/install.py)
for installscript in "${list[@]}"; do
EXTNAME=$(echo $installscript | cut -d '/' -f 3)
# Skip installing dependencies if extension is disabled in config
if $(jq -e ".disabled_extensions|any(. == \"$EXTNAME\")" config.json); then
echo "Skipping disabled extension ($EXTNAME)"
continue
fi
PYTHONPATH=${ROOT} python "$installscript"
done
if [ -f "/data/config/reforge/startup.sh" ]; then
pushd ${ROOT}
echo "Running startup script"
. /data/config/reforge/startup.sh
popd
fi
exec "$@"

View file

@ -0,0 +1,95 @@
ARG SWARMUI_VERSION=0.9.7-Beta
ARG COMFYUI_VERSION=v0.3.59
ARG COMFYUI_MANAGER_VERSION=3.35
ARG DOTNET_VERSION=9.0-bookworm-slim
#ARG DOTNET_VERSION=8.0-bookworm-slim
ARG ASPNET_VERSION=9.0-bookworm-slim
#ARG ASPNET_VERSION=8.0-bookworm-slim
ARG SWARM_PATH="/SwarmUI"
# BUILD
FROM mcr.microsoft.com/dotnet/sdk:${DOTNET_VERSION} AS build
ARG SWARM_PATH
ENV DOTNET_CLI_TELEMETRY_OPTOUT=1
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && \
apt-get install -y git
WORKDIR ${SWARM_PATH}
# Clones the SwarmUI repository and checks out the latest release
RUN git clone --depth=1 https://github.com/mcmonkeyprojects/SwarmUI.git ${SWARM_PATH} && \
cd ${SWARM_PATH} && \
git fetch origin ${SWARMUI_VERSION} && \
git checkout FETCH_HEAD
RUN dotnet build src/SwarmUI.csproj --configuration Release -o ./bin
# RUN
FROM mcr.microsoft.com/dotnet/aspnet:${ASPNET_VERSION}
ARG SWARM_PATH
ENV DOTNET_CLI_TELEMETRY_OPTOUT=1
ARG SWARMUI_USER_ID=1000
ARG SWARMUI_GROUP_ID=1000
ARG GPU_TYPE="nv"
ENV NVIDIA_VISIBLE_DEVICES=all
ENV CLI_ARGS=""
RUN addgroup --gid $SWARMUI_GROUP_ID swarmui && \
adduser --uid $SWARMUI_USER_ID --gid $SWARMUI_GROUP_ID --gecos "" --disabled-password swarmui
COPY --from=build ${SWARM_PATH} "${SWARM_PATH}/"
RUN mkdir -p "${SWARM_PATH}/Data" && \
chown -R swarmui:swarmui ${SWARM_PATH}
ENV HOME=${SWARM_PATH}
RUN --mount=type=cache,target=/var/cache/apt \
apt update --assume-yes && \
apt install -y \
git \
wget \
build-essential \
python3.11 \
python3.11-venv \
python3.11-dev \
python3-pip \
ffmpeg \
libglib2.0-0 \
libgl1
# Install ComfyUI
RUN git clone --depth=1 https://github.com/comfyanonymous/ComfyUI.git /opt/comfyui && \
cd /opt/comfyui && \
git fetch origin ${COMFYUI_VERSION} && \
git checkout FETCH_HEAD
RUN git clone --depth=1 https://github.com/Comfy-Org/ComfyUI-Manager.git /opt/comfyui-manager && \
cd /opt/comfyui-manager && \
git fetch origin ${COMFYUI_MANAGER_VERSION} && \
git checkout FETCH_HEAD
WORKDIR ${SWARM_PATH}
RUN chown -R swarmui:swarmui /opt/comfyui
RUN git config --global --add safe.directory /opt/comfyui
ENV COMFYUI_PATH="/SwarmUI/dlbackend/ComfyUI"
ENV CUSTOM_NODES_PATH="/SwarmUI/dlbackend/ComfyUI/custom_nodes"
# Pre-install previously used custom nodes requirements from volume
COPY ./install/merged-requirements.txt* /docker/requirements.txt
COPY comfy-install-linux.sh ${SWARM_PATH}/launchtools/
RUN chmod +x ${SWARM_PATH}/launchtools/comfy-install-linux.sh && \
${SWARM_PATH}/launchtools/comfy-install-linux.sh ${GPU_TYPE}
RUN chown -R swarmui:swarmui ${COMFYUI_PATH}/venv # Reapplied again bc of permissions issues, maybe related to a symlink/docker/windows bug
ENV PATH="${COMFYUI_PATH}/venv/bin:$PATH"
ENV PYTHONPATH="${CUSTOM_NODES_PATH}:\${PYTHONPATH}"
USER swarmui
EXPOSE 7801
COPY entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh", "--launch_mode", "none", "--host", "0.0.0.0"]

View file

@ -0,0 +1,104 @@
#!/usr/bin/env bash
# Check if GPU type is provided
if [ $# -eq 0 ]; then
>&2 echo "Error: GPU type not specified. Please use 'amd' or 'nv' as an argument."
exit 1
fi
GPU_TYPE=$1
# Validate GPU type
if [ "$GPU_TYPE" != "amd" ] && [ "$GPU_TYPE" != "nv" ]; then
>&2 echo "Error: Invalid GPU type. Please use 'amd' or 'nv'."
exit 1
fi
mkdir dlbackend
# Creates the symlink for the ComfyUI directory
echo "Creating symlink for ComfyUI..."
rm --force ${CUSTOM_NODES_PATH}/ComfyUI-Manager
ln -s \
/opt/comfyui \
${COMFYUI_PATH}
echo "Creating symlink for ComfyUI Manager..."
rm --force ${CUSTOM_NODES_PATH}/ComfyUI-Manager
ln -s \
/opt/comfyui-manager \
${CUSTOM_NODES_PATH}/ComfyUI-Manager
#cd ComfyUI
cd ${COMFYUI_PATH}
# Try to find a good python executable, and dodge unsupported python versions
for pyvers in python3.11 python3.10 python3.12 python3 python
do
python=`which $pyvers`
if [ "$python" != "" ]; then
break
fi
done
if [ "$python" == "" ]; then
>&2 echo "ERROR: cannot find python3"
>&2 echo "Please follow the install instructions in the readme!"
exit 1
fi
# Validate venv
venv=`$python -m venv 2>&1`
case $venv in
*usage*)
:
;;
*)
>&2 echo "ERROR: python venv is not installed"
>&2 echo "Please follow the install instructions in the readme!"
>&2 echo "If on Ubuntu/Debian, you may need: sudo apt install python3-venv"
exit 1
;;
esac
# Make and activate the venv. "python3" in the venv is now the python executable.
if [ -z "${SWARM_NO_VENV}" ]; then
echo "Making venv..."
$python -s -m venv venv
source venv/bin/activate
python=python3
python3 -m ensurepip --upgrade
else
echo "swarm_no_venv set, will not create venv"
fi
# Install PyTorch based on GPU type
if [ "$GPU_TYPE" == "nv" ]; then
echo "install nvidia torch..."
$python -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu128
elif [ "$GPU_TYPE" == "amd" ]; then
echo "install amd torch..."
$python -s -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.3
fi
echo "install general requirements..."
$python -s -m pip install --no-cache-dir \
triton \
sageattention \
opencv-python \
diffusers \
psutil
# Installs the required Python packages for both ComfyUI and the ComfyUI Manager
$python -s -m pip install --no-cache-dir \
--requirement ${COMFYUI_PATH}/requirements.txt \
--requirement ${CUSTOM_NODES_PATH}/ComfyUI-Manager/requirements.txt
# Pre-install previously used custom nodes requirements from volume
if [ -f "/docker/requirements.txt" ]; then
echo "pre-install custom nodes requirements..."
$python -s -m pip install --no-cache-dir -r /docker/requirements.txt
elif [ "$GPU_TYPE" == "amd" ]; then
echo "merged-requirements.txt not found, skipping pre-install."
fi
echo "Installation completed for $GPU_TYPE GPU."

View file

@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Ensure correct local path.
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
cd "$SCRIPT_DIR"
# Add dotnet non-admin-install to path
export PATH="$SCRIPT_DIR/.dotnet:~/.dotnet:$PATH"
# Default env configuration, gets overwritten by the C# code's settings handler
export ASPNETCORE_ENVIRONMENT="Production"
export ASPNETCORE_URLS="http://*:7801"
chmod -R 755 ${COMFYUI_PATH}/user/default/
chmod -R 755 /opt/comfyui/user/default/
echo "Using Python at: $(which python)"
echo "Python version: $(python --version)"
# The custom nodes that were installed using the ComfyUI Manager may have requirements of their own, which are not installed when the container is
# started for the first time; this loops over all custom nodes and installs the requirements of each custom node
echo "Installing requirements for custom nodes..."
for CUSTOM_NODE_DIRECTORY in ${CUSTOM_NODES_PATH}/*;
do
if [ "$CUSTOM_NODE_DIRECTORY" != "${CUSTOM_NODES_PATH}/ComfyUI-Manager" ];
then
if [ -f "$CUSTOM_NODE_DIRECTORY/requirements.txt" ];
then
CUSTOM_NODE_NAME=${CUSTOM_NODE_DIRECTORY##*/}
CUSTOM_NODE_NAME=${CUSTOM_NODE_NAME//[-_]/ }
echo "Installing requirements for $CUSTOM_NODE_NAME..."
python3 -s -m pip install --requirement "$CUSTOM_NODE_DIRECTORY/requirements.txt"
fi
fi
done
# Actual runner.
cd "$HOME"
dotnet ./bin/SwarmUI.dll "$@"
# Exit code 42 means restart, anything else = don't.
if [ $? == 42 ]; then
. /entrypoint.sh "$@"
fi

2
services/swarmui/install/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
reqs
merged-requirements.txt

View file

@ -0,0 +1,52 @@
#!/bin/bash
# Get custom nodes requirements and merge latest versions
REQ_PATH="data/config/comfy/custom_nodes"
BUILD_PATH=$(dirname "$0")
mkdir -p ${BUILD_PATH}/reqs
for f in ${REQ_PATH}/*/requirements.txt; do \
node=$(basename $(dirname "$f")); \
cp "$f" ${BUILD_PATH}/reqs/${node}-requirements.txt; \
done
find ${BUILD_PATH}/reqs -maxdepth 1 -name "*requirements.txt" -exec cat {} + \
| grep -v '^#' \
| grep -v '^git' \
| sed 's/==.*//' \
| awk '{print tolower($0)}' \
| sed 's/[[:space:]]//g' \
| sort -u \
| awk '
{
line = $0;
if (line ~ /^[[:space:]]*$/) { next }
if (line ~ /git\+/ || line ~ /\[.*\]/) {
print "Z_" line, "0", line
next
}
split(line, a, "[<>=]")
package = a[1]
version = a[2]
gsub(/[[:space:]]+/, "", package)
gsub(/_/, "-", package)
if (version == "") {
version = "0"
}
print package, version, line
}
' \
| sort -k1,1 -V -k2,2 \
| awk '
{
if (prev_package != $1) {
if (NR > 1) {
print prev_line
}
prev_package = $1
}
prev_line = $3
}
END {
print prev_line
}
' \
> ${BUILD_PATH}/merged-requirements.txt