mirror of
https://github.com/AbdBarho/stable-diffusion-webui-docker.git
synced 2026-02-03 14:14:18 +01:00
clean/consolidate dockerfile and change *.py to sed commands
This commit is contained in:
parent
015c2ec829
commit
74cea377af
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -1,5 +1,6 @@
|
|||
/.devcontainer
|
||||
/docker-compose.override.yml
|
||||
.DS_Store
|
||||
|
||||
# VSCode specific
|
||||
*.code-workspace
|
||||
|
|
|
|||
|
|
@ -71,3 +71,11 @@ services:
|
|||
deploy: {}
|
||||
environment:
|
||||
- CLI_ARGS=--cpu
|
||||
|
||||
|
||||
auto-m1:
|
||||
<<: *automatic
|
||||
profiles: ["auto-m1"]
|
||||
deploy: {}
|
||||
environment:
|
||||
- CLI_ARGS=--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate
|
||||
|
|
@ -1,69 +1,226 @@
|
|||
FROM alpine/git:2.36.2 as download
|
||||
|
||||
COPY clone.sh /clone.sh
|
||||
# docker build -t sdwui:test -f Dockercleaned .
|
||||
# docker run -it -p 7860:7861 sdwui:test
|
||||
# FROM alpine/git:2.36.2 as download
|
||||
|
||||
# COPY clone.sh /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh
|
||||
|
||||
|
||||
RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
|
||||
&& rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
|
||||
# # RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
|
||||
# # && rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf
|
||||
|
||||
RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
|
||||
&& rm -rf assets inputs
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
|
||||
# && rm -rf assets inputs
|
||||
|
||||
RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
|
||||
RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
|
||||
RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
|
||||
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
|
||||
|
||||
###################################################################################################################################################
|
||||
###################################################################################################################################################
|
||||
###################################################################################################################################################
|
||||
FROM ubuntu:22.04
|
||||
|
||||
FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime
|
||||
# Set env vars
|
||||
ARG GRADIO_SERVER_PORT=7860
|
||||
ENV GRADIO_SERVER_PORT=${GRADIO_SERVER_PORT}
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
|
||||
# These settings prevent a timezone prompt when Python installs
|
||||
ENV TZ=US/Pacific \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && \
|
||||
# we need those
|
||||
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
|
||||
# extensions needs those
|
||||
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential
|
||||
RUN echo "#################################################"
|
||||
RUN echo "Get the latest APT packages"
|
||||
RUN echo "apt-get update"
|
||||
RUN apt-get update
|
||||
|
||||
# Install AUTOMATIC1111 pre-requisites
|
||||
RUN apt-get install -y \
|
||||
cmake \
|
||||
rustc \
|
||||
git-all \
|
||||
wget \
|
||||
apt-utils \
|
||||
jq \
|
||||
pip
|
||||
|
||||
WORKDIR /
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \
|
||||
cd stable-diffusion-webui && \
|
||||
git reset --hard cf2772fab0af5573da775e7437e6acdca424f26e && \
|
||||
RUN apt -y autoremove && apt autoclean
|
||||
|
||||
# Install Python pre-requisites, including Python 3.x
|
||||
# Google perftools includes TCMalloc, which helps with CPU memory usage
|
||||
RUN apt-get install -y \
|
||||
software-properties-common \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-ipykernel \
|
||||
libopencv-dev \
|
||||
python3-opencv \
|
||||
python3.10-venv \
|
||||
google-perftools \
|
||||
sudo
|
||||
|
||||
RUN apt -y autoremove && apt autoclean
|
||||
|
||||
# Configure git
|
||||
RUN git config --global user.name "Some One" &&\
|
||||
git config --global user.email some.one@some.one &&\
|
||||
git config --global init.defaultBranch main
|
||||
|
||||
RUN sudo apt purge gcc -y
|
||||
RUN sudo apt purge libomp-dev -y
|
||||
RUN export USE_OPENMP=1
|
||||
ENV USE_OPENMP=1
|
||||
RUN sudo apt-get install gcc libomp-dev -y
|
||||
|
||||
# Check versions
|
||||
RUN echo |cpp -fopenmp -dM |grep -i open
|
||||
RUN gcc --version
|
||||
|
||||
RUN echo "deb http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/ubuntu/ trusty main" | sudo tee /etc/apt/sources.list.d/ibm-xl-compiler-eval.list
|
||||
RUN sudo apt-get update
|
||||
|
||||
RUN "/usr/bin/python3" -m pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
|
||||
# RUN mkdir -p /home/stable_diffusion_webui/stable-diffusion-webui
|
||||
# WORKDIR /home/stable_diffusion_webui
|
||||
#
|
||||
#
|
||||
#
|
||||
# REPLACE COPY WITH GIT CLONE
|
||||
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/bef51aed032c0aaa5cfd80445bc4cf0d85b408b5
|
||||
RUN mkdir -p /home/stable_diffusion_webui
|
||||
WORKDIR /home/stable_diffusion_webui
|
||||
# COPY clone.sh /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh
|
||||
# RUN . /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh stable-diffusion-webui https://github.com/AUTOMATIC1111/stable-diffusion-webui.git bef51aed032c0aaa5cfd80445bc4cf0d85b408b5
|
||||
# RUN git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git /home/stable_diffusion_webui/stable-diffusion-webui
|
||||
# WORKDIR /home/stable_diffusion_webui/stable-diffusion-webui
|
||||
# RUN git checkout bef51aed032c0aaa5cfd80445bc4cf0d85b408b5
|
||||
# WORKDIR /home/stable_diffusion_webui
|
||||
# Using their way
|
||||
# RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
RUN git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git /home/stable_diffusion_webui/stable-diffusion-webui && \
|
||||
cd /home/stable_diffusion_webui/stable-diffusion-webui && \
|
||||
git reset --hard bef51aed032c0aaa5cfd80445bc4cf0d85b408b5 && \
|
||||
pip install -r requirements_versions.txt
|
||||
# cd -
|
||||
RUN "/usr/bin/python3" -m pip install -r /home/stable_diffusion_webui/stable-diffusion-webui/requirements_versions.txt
|
||||
WORKDIR /home/stable_diffusion_webui
|
||||
|
||||
|
||||
ENV ROOT=/stable-diffusion-webui
|
||||
|
||||
COPY --from=download /repositories/ ${ROOT}/repositories/
|
||||
RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt
|
||||
# Modify code from AUTOMATIC1111 repo as needed for Mac M1
|
||||
COPY paths_internal.py /home/stable_diffusion_webui/stable-diffusion-webui/modules/paths_internal.py
|
||||
COPY sd_models.py /home/stable_diffusion_webui/stable-diffusion-webui/modules/sd_models.py
|
||||
COPY webui-macos-env.sh /home/stable_diffusion_webui/stable-diffusion-webui/webui-macos-env.sh
|
||||
COPY webui-user.sh /home/stable_diffusion_webui/stable-diffusion-webui/webui-user.sh
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install pyngrok xformers==0.0.23.post1 \
|
||||
git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \
|
||||
git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \
|
||||
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
|
||||
# IS THIS NEEDED?
|
||||
COPY webui.sh /home/stable_diffusion_webui/stable-diffusion-webui/webui.sh
|
||||
|
||||
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
|
||||
# maybe move this up to the dependencies list.
|
||||
RUN apt-get -y install libgoogle-perftools-dev && apt-get clean
|
||||
ENV LD_PRELOAD=libtcmalloc.so
|
||||
RUN chmod 777 -R /home/stable_diffusion_webui/stable-diffusion-webui/
|
||||
|
||||
COPY . /docker
|
||||
|
||||
RUN \
|
||||
# mv ${ROOT}/style.css ${ROOT}/user.css && \
|
||||
# one of the ugliest hacks I ever wrote \
|
||||
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.10/site-packages/gradio/routes.py && \
|
||||
git config --global --add safe.directory '*'
|
||||
|
||||
WORKDIR ${ROOT}
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
ENV CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
ENTRYPOINT ["/docker/entrypoint.sh"]
|
||||
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}
|
||||
|
||||
RUN export OPENBLAS_NUM_THREADS=1
|
||||
WORKDIR /home/stable_diffusion_webui/stable-diffusion-webui
|
||||
RUN git config --global --add safe.directory "*"
|
||||
|
||||
# RUN useradd -s /bin/bash -d /home/sdwui/ -m -G sudo sdwui
|
||||
# USER sdwui
|
||||
# RUN git config --global --add safe.directory "*"
|
||||
# RUN ./webui.sh
|
||||
# RUN source /home/stable_diffusion_webui/stable-diffusion-webui/webui-macos-env.sh
|
||||
# RUN source source /home/stable_diffusion_webui/stable-diffusion-webui/webui-user.sh
|
||||
|
||||
# RUN "/usr/bin/python3" -m pip install -r requirements_versions.txt
|
||||
|
||||
# # Need to be after `webui.sh` installs reqs like gradio
|
||||
# RUN curl -LS https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_aarch64 -o frpc_linux_aarch64_v0.2
|
||||
# # ENTRYPOINT [ "/bin/bash" ]
|
||||
# # ENV python=/usr/bin/python3
|
||||
# # RUN site_pkg_loc=`python -m site | grep 'site-packages' | head -1 | awk -F"'" '{print $2}' -`; mv frpc_linux_aarch64_v0.2 `echo $site_pkg_loc`/gradio/frpc_linux_aarch64_v0.2; chmod +x `echo $site_pkg_loc`/gradio/frpc_linux_aarch64_v0.2
|
||||
# RUN mv frpc_linux_aarch64_v0.2 /home/sdwui/.local/lib/python3.10/site-packages/gradio/.
|
||||
# RUN mv frpc_linux_aarch64_v0.2 /home/sdwui/.local/lib/python3.10/site-packages/gradio/.
|
||||
# RUN chmod +x /home/sdwui/.local/lib/python3.10/site-packages/gradio/frpc_linux_aarch64_v0.2
|
||||
|
||||
####################################################################################################
|
||||
RUN "/usr/bin/python3" -m pip install gradio==3.41.2 && \
|
||||
curl -LS https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_aarch64 -o frpc_linux_aarch64_v0.2 && \
|
||||
chmod +x frpc_linux_aarch64_v0.2
|
||||
##################################################
|
||||
RUN if [ -d /usr/local/lib/python3.10/dist-packages/gradio/ ]; then scp frpc_linux_aarch64_v0.2 /usr/local/lib/python3.10/dist-packages/gradio/.; fi
|
||||
RUN if [ -d /usr/lib/python3/dist-packages/gradio ]; then scp frpc_linux_aarch64_v0.2 /usr/lib/python3.10/dist-packages/gradio/.; fi
|
||||
RUN if [ -d /usr/lib/python3.10/dist-packages/gradio ]; then scp frpc_linux_aarch64_v0.2 /usr/lib/python3.10/dist-packages/gradio/.; fi
|
||||
RUN if [ -d /home/sdwui/.local/lib/python3.10/site-packages/gradio ]; then scp frpc_linux_aarch64_v0.2 /home/sdwui/.local/lib/python3.10/site-packages/gradio/.; fi
|
||||
##################################################
|
||||
RUN if [ -d /home/sdwui/.local/lib/python3.10/site-packages/gradio ]; then \
|
||||
GRADIO_DIR_PATH=/home/sdwui/.local/lib/python3.10/site-packages/gradio \
|
||||
else \
|
||||
GRADIO_DIR_PATH=/usr/lib/python3/dist-packages/gradio; fi && \
|
||||
mv frpc_linux_aarch64_v0.2 ${GRADIO_DIR_PATH}/frpc_linux_aarch64_v0.2 && \
|
||||
chmod +x ${GRADIO_DIR_PATH}/frpc_linux_aarch64_v0.2
|
||||
####################################################################################################
|
||||
|
||||
# RUN mv /home/stable_diffusion_webui/stable-diffusion-webui /stable-diffusion-webui
|
||||
# WORKDIR /stable-diffusion-webui
|
||||
|
||||
# RUN useradd -s /bin/bash -d /home/sdwui/ -m -G sudo sdwui
|
||||
# USER sdwui
|
||||
# RUN git config --global --add safe.directory "*"
|
||||
|
||||
# COPY entrypoint.sh /home/stable_diffusion_webui/entrypoint.sh
|
||||
|
||||
RUN export USE_OPENMP=1
|
||||
ENV USE_OPENMP=1
|
||||
RUN export OPENBLAS_NUM_THREADS=1
|
||||
ENV OPENBLAS_NUM_THREADS=1
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
RUN mkdir -p /home/stable_diffusion_webui/stable-diffusion-webui/repositories
|
||||
COPY clone.sh /home/stable_diffusion_webui/stable-diffusion-webui/.
|
||||
WORKDIR /home/stable_diffusion_webui/stable-diffusion-webui
|
||||
RUN chmod +x /home/stable_diffusion_webui/stable-diffusion-webui/clone.sh
|
||||
|
||||
# RUN git clone https://github.com/Stability-AI/stablediffusion.git \
|
||||
# && cd stablediffusion && \
|
||||
# && git reset --hard cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
|
||||
# && rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif .git \
|
||||
# && cd .. \
|
||||
# && mkdir /home/stable_diffusion_webui/stable-diffusion-webui/repositories \
|
||||
# && mv stablediffusion /home/stable_diffusion_webui/stable-diffusion-webui/repositories/stable-diffusion-stability-ai
|
||||
|
||||
RUN ./clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
|
||||
&& rm -rf assets inputs
|
||||
RUN ./clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf
|
||||
RUN ./clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
|
||||
RUN ./clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
|
||||
RUN ./clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
|
||||
RUN ./clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
|
||||
RUN ./clone.sh stable-diffusion-webui-assets https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets.git 6f7db241d2f8ba7457bac5ca9753331f0c266917
|
||||
RUN mv /repositories/* /home/stable_diffusion_webui/stable-diffusion-webui/repositories/.
|
||||
|
||||
RUN "/usr/bin/python3" -m pip install clip
|
||||
|
||||
|
||||
|
||||
############################################################################################################################################
|
||||
##############
|
||||
### REMOVE ###
|
||||
##############
|
||||
# COPY sd-v1-4.ckpt /home/stable_diffusion_webui/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt
|
||||
############################################################################################################################################
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# RUN export COMMANDLINE_ARGS="--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
# ENV COMMANDLINE_ARGS="--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
# CMD /usr/bin/python3 -u webui.py --listen --port 7860 ${COMMANDLINE_ARGS}
|
||||
CMD /usr/bin/python3 -u webui.py --listen --port 7860
|
||||
# ENTRYPOINT [ "/bin/bash" ]
|
||||
69
services/AUTOMATIC1111/Dockerfile.theirs
Normal file
69
services/AUTOMATIC1111/Dockerfile.theirs
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
FROM alpine/git:2.36.2 as download
|
||||
|
||||
COPY clone.sh /clone.sh
|
||||
|
||||
|
||||
RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf \
|
||||
&& rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif
|
||||
|
||||
RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \
|
||||
&& rm -rf assets inputs
|
||||
|
||||
RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9
|
||||
RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git ab527a9a6d347f364e3d185ba6d714e22d80cb3c
|
||||
RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2cf03aaf6e704197fd0dae7c7f96aa59cf1b11c9
|
||||
RUN . /clone.sh generative-models https://github.com/Stability-AI/generative-models 45c443b316737a4ab6e40413d7794a7f5657c19f
|
||||
|
||||
|
||||
FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
apt-get update && \
|
||||
# we need those
|
||||
apt-get install -y fonts-dejavu-core rsync git jq moreutils aria2 \
|
||||
# extensions needs those
|
||||
ffmpeg libglfw3-dev libgles2-mesa-dev pkg-config libcairo2 libcairo2-dev build-essential
|
||||
|
||||
|
||||
WORKDIR /
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git && \
|
||||
cd stable-diffusion-webui && \
|
||||
git reset --hard cf2772fab0af5573da775e7437e6acdca424f26e && \
|
||||
pip install -r requirements_versions.txt
|
||||
|
||||
|
||||
ENV ROOT=/stable-diffusion-webui
|
||||
|
||||
COPY --from=download /repositories/ ${ROOT}/repositories/
|
||||
RUN mkdir ${ROOT}/interrogate && cp ${ROOT}/repositories/clip-interrogator/clip_interrogator/data/* ${ROOT}/interrogate
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install -r ${ROOT}/repositories/CodeFormer/requirements.txt
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install pyngrok xformers==0.0.23.post1 \
|
||||
git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 \
|
||||
git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 \
|
||||
git+https://github.com/mlfoundations/open_clip.git@v2.20.0
|
||||
|
||||
# there seems to be a memory leak (or maybe just memory not being freed fast enough) that is fixed by this version of malloc
|
||||
# maybe move this up to the dependencies list.
|
||||
RUN apt-get -y install libgoogle-perftools-dev && apt-get clean
|
||||
ENV LD_PRELOAD=libtcmalloc.so
|
||||
|
||||
COPY . /docker
|
||||
|
||||
RUN \
|
||||
# mv ${ROOT}/style.css ${ROOT}/user.css && \
|
||||
# one of the ugliest hacks I ever wrote \
|
||||
sed -i 's/in_app_dir = .*/in_app_dir = True/g' /opt/conda/lib/python3.10/site-packages/gradio/routes.py && \
|
||||
git config --global --add safe.directory '*'
|
||||
|
||||
WORKDIR ${ROOT}
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
ENV CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
ENTRYPOINT ["/docker/entrypoint.sh"]
|
||||
CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}
|
||||
38
services/AUTOMATIC1111/paths_internal.py
Normal file
38
services/AUTOMATIC1111/paths_internal.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
"""this module defines internal paths used by program and is safe to import before dependencies are installed in launch.py"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
normalized_filepath = lambda filepath: str(Path(filepath).absolute())
|
||||
|
||||
# commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
|
||||
commandline_args = os.environ.get('COMMANDLINE_ARGS', "--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half")
|
||||
sys.argv += shlex.split(commandline_args)
|
||||
|
||||
cwd = os.getcwd()
|
||||
modules_path = os.path.dirname(os.path.realpath(__file__))
|
||||
script_path = os.path.dirname(modules_path)
|
||||
|
||||
sd_configs_path = os.path.join(script_path, "configs")
|
||||
sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
|
||||
sd_model_file = os.path.join(script_path, 'model.ckpt')
|
||||
default_sd_model_file = sd_model_file
|
||||
|
||||
# Parse the --data-dir flag first so we can use it as a base for our other argument default values
|
||||
parser_pre = argparse.ArgumentParser(add_help=False)
|
||||
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
|
||||
cmd_opts_pre = parser_pre.parse_known_args()[0]
|
||||
|
||||
data_path = cmd_opts_pre.data_dir
|
||||
|
||||
models_path = os.path.join(data_path, "models")
|
||||
extensions_dir = os.path.join(data_path, "extensions")
|
||||
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
|
||||
config_states_dir = os.path.join(script_path, "config_states")
|
||||
default_output_dir = os.path.join(data_path, "output")
|
||||
|
||||
roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf')
|
||||
939
services/AUTOMATIC1111/sd_models.py
Normal file
939
services/AUTOMATIC1111/sd_models.py
Normal file
|
|
@ -0,0 +1,939 @@
|
|||
import collections
|
||||
import os.path
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import torch
|
||||
import re
|
||||
import safetensors.torch
|
||||
from omegaconf import OmegaConf, ListConfig
|
||||
from os import mkdir
|
||||
from urllib import request
|
||||
import ldm.modules.midas as midas
|
||||
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches
|
||||
from modules.timer import Timer
|
||||
from modules.shared import opts
|
||||
import tomesd
|
||||
import numpy as np
|
||||
|
||||
model_dir = "Stable-diffusion"
|
||||
model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
|
||||
|
||||
checkpoints_list = {}
|
||||
checkpoint_aliases = {}
|
||||
checkpoint_alisases = checkpoint_aliases # for compatibility with old name
|
||||
checkpoints_loaded = collections.OrderedDict()
|
||||
|
||||
|
||||
def replace_key(d, key, new_key, value):
|
||||
keys = list(d.keys())
|
||||
|
||||
d[new_key] = value
|
||||
|
||||
if key not in keys:
|
||||
return d
|
||||
|
||||
index = keys.index(key)
|
||||
keys[index] = new_key
|
||||
|
||||
new_d = {k: d[k] for k in keys}
|
||||
|
||||
d.clear()
|
||||
d.update(new_d)
|
||||
return d
|
||||
|
||||
|
||||
class CheckpointInfo:
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
abspath = os.path.abspath(filename)
|
||||
abs_ckpt_dir = os.path.abspath(shared.cmd_opts.ckpt_dir) if shared.cmd_opts.ckpt_dir is not None else None
|
||||
|
||||
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
||||
|
||||
if abs_ckpt_dir and abspath.startswith(abs_ckpt_dir):
|
||||
name = abspath.replace(abs_ckpt_dir, '')
|
||||
elif abspath.startswith(model_path):
|
||||
name = abspath.replace(model_path, '')
|
||||
else:
|
||||
name = os.path.basename(filename)
|
||||
|
||||
if name.startswith("\\") or name.startswith("/"):
|
||||
name = name[1:]
|
||||
|
||||
def read_metadata():
|
||||
metadata = read_metadata_from_safetensors(filename)
|
||||
self.modelspec_thumbnail = metadata.pop('modelspec.thumbnail', None)
|
||||
|
||||
return metadata
|
||||
|
||||
self.metadata = {}
|
||||
if self.is_safetensors:
|
||||
try:
|
||||
self.metadata = cache.cached_data_for_file('safetensors-metadata', "checkpoint/" + name, filename, read_metadata)
|
||||
except Exception as e:
|
||||
errors.display(e, f"reading metadata for {filename}")
|
||||
|
||||
self.name = name
|
||||
self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
|
||||
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
|
||||
self.hash = model_hash(filename)
|
||||
|
||||
self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}")
|
||||
self.shorthash = self.sha256[0:10] if self.sha256 else None
|
||||
|
||||
self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
|
||||
self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]'
|
||||
|
||||
self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
|
||||
if self.shorthash:
|
||||
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
|
||||
|
||||
def register(self):
|
||||
checkpoints_list[self.title] = self
|
||||
for id in self.ids:
|
||||
checkpoint_aliases[id] = self
|
||||
|
||||
def calculate_shorthash(self):
|
||||
self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}")
|
||||
if self.sha256 is None:
|
||||
return
|
||||
|
||||
shorthash = self.sha256[0:10]
|
||||
if self.shorthash == self.sha256[0:10]:
|
||||
return self.shorthash
|
||||
|
||||
self.shorthash = shorthash
|
||||
|
||||
if self.shorthash not in self.ids:
|
||||
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
|
||||
|
||||
old_title = self.title
|
||||
self.title = f'{self.name} [{self.shorthash}]'
|
||||
self.short_title = f'{self.name_for_extra} [{self.shorthash}]'
|
||||
|
||||
replace_key(checkpoints_list, old_title, self.title, self)
|
||||
self.register()
|
||||
|
||||
return self.shorthash
|
||||
|
||||
|
||||
try:
|
||||
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
||||
from transformers import logging, CLIPModel # noqa: F401
|
||||
|
||||
logging.set_verbosity_error()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def setup_model():
|
||||
"""called once at startup to do various one-time tasks related to SD models"""
|
||||
|
||||
os.makedirs(model_path, exist_ok=True)
|
||||
|
||||
enable_midas_autodownload()
|
||||
patch_given_betas()
|
||||
|
||||
|
||||
def checkpoint_tiles(use_short=False):
|
||||
return [x.short_title if use_short else x.title for x in checkpoints_list.values()]
|
||||
|
||||
|
||||
def list_models():
|
||||
checkpoints_list.clear()
|
||||
checkpoint_aliases.clear()
|
||||
|
||||
cmd_ckpt = shared.cmd_opts.ckpt
|
||||
if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
|
||||
model_url = None
|
||||
else:
|
||||
# model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
|
||||
# _download_name_="v1-5-pruned-emaonly.safetensors"
|
||||
model_url = "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt"
|
||||
_download_name_ = "sd-v1-4.ckpt"
|
||||
|
||||
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name=_download_name_, ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
|
||||
|
||||
if os.path.exists(cmd_ckpt):
|
||||
checkpoint_info = CheckpointInfo(cmd_ckpt)
|
||||
checkpoint_info.register()
|
||||
|
||||
shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
|
||||
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
|
||||
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
|
||||
|
||||
for filename in model_list:
|
||||
checkpoint_info = CheckpointInfo(filename)
|
||||
checkpoint_info.register()
|
||||
|
||||
|
||||
re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$")
|
||||
|
||||
|
||||
def get_closet_checkpoint_match(search_string):
|
||||
if not search_string:
|
||||
return None
|
||||
|
||||
checkpoint_info = checkpoint_aliases.get(search_string, None)
|
||||
if checkpoint_info is not None:
|
||||
return checkpoint_info
|
||||
|
||||
found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
|
||||
if found:
|
||||
return found[0]
|
||||
|
||||
search_string_without_checksum = re.sub(re_strip_checksum, '', search_string)
|
||||
found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title))
|
||||
if found:
|
||||
return found[0]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def model_hash(filename):
|
||||
"""old hash that only looks at a small part of the file and is prone to collisions"""
|
||||
|
||||
try:
|
||||
with open(filename, "rb") as file:
|
||||
import hashlib
|
||||
m = hashlib.sha256()
|
||||
|
||||
file.seek(0x100000)
|
||||
m.update(file.read(0x10000))
|
||||
return m.hexdigest()[0:8]
|
||||
except FileNotFoundError:
|
||||
return 'NOFILE'
|
||||
|
||||
|
||||
def select_checkpoint():
|
||||
"""Raises `FileNotFoundError` if no checkpoints are found."""
|
||||
model_checkpoint = shared.opts.sd_model_checkpoint
|
||||
|
||||
checkpoint_info = checkpoint_aliases.get(model_checkpoint, None)
|
||||
if checkpoint_info is not None:
|
||||
return checkpoint_info
|
||||
|
||||
if len(checkpoints_list) == 0:
|
||||
error_message = "No checkpoints found. When searching for checkpoints, looked at:"
|
||||
if shared.cmd_opts.ckpt is not None:
|
||||
error_message += f"\n - file {os.path.abspath(shared.cmd_opts.ckpt)}"
|
||||
error_message += f"\n - directory {model_path}"
|
||||
if shared.cmd_opts.ckpt_dir is not None:
|
||||
error_message += f"\n - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
|
||||
error_message += "Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations."
|
||||
raise FileNotFoundError(error_message)
|
||||
|
||||
checkpoint_info = next(iter(checkpoints_list.values()))
|
||||
if model_checkpoint is not None:
|
||||
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
|
||||
|
||||
return checkpoint_info
|
||||
|
||||
|
||||
checkpoint_dict_replacements_sd1 = {
|
||||
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
|
||||
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
|
||||
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
|
||||
}
|
||||
|
||||
checkpoint_dict_replacements_sd2_turbo = { # Converts SD 2.1 Turbo from SGM to LDM format.
|
||||
'conditioner.embedders.0.': 'cond_stage_model.',
|
||||
}
|
||||
|
||||
|
||||
def transform_checkpoint_dict_key(k, replacements):
|
||||
for text, replacement in replacements.items():
|
||||
if k.startswith(text):
|
||||
k = replacement + k[len(text):]
|
||||
|
||||
return k
|
||||
|
||||
|
||||
def get_state_dict_from_checkpoint(pl_sd):
|
||||
pl_sd = pl_sd.pop("state_dict", pl_sd)
|
||||
pl_sd.pop("state_dict", None)
|
||||
|
||||
is_sd2_turbo = 'conditioner.embedders.0.model.ln_final.weight' in pl_sd and pl_sd['conditioner.embedders.0.model.ln_final.weight'].size()[0] == 1024
|
||||
|
||||
sd = {}
|
||||
for k, v in pl_sd.items():
|
||||
if is_sd2_turbo:
|
||||
new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd2_turbo)
|
||||
else:
|
||||
new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd1)
|
||||
|
||||
if new_key is not None:
|
||||
sd[new_key] = v
|
||||
|
||||
pl_sd.clear()
|
||||
pl_sd.update(sd)
|
||||
|
||||
return pl_sd
|
||||
|
||||
|
||||
def read_metadata_from_safetensors(filename):
|
||||
import json
|
||||
|
||||
with open(filename, mode="rb") as file:
|
||||
metadata_len = file.read(8)
|
||||
metadata_len = int.from_bytes(metadata_len, "little")
|
||||
json_start = file.read(2)
|
||||
|
||||
assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
|
||||
json_data = json_start + file.read(metadata_len-2)
|
||||
json_obj = json.loads(json_data)
|
||||
|
||||
res = {}
|
||||
for k, v in json_obj.get("__metadata__", {}).items():
|
||||
res[k] = v
|
||||
if isinstance(v, str) and v[0:1] == '{':
|
||||
try:
|
||||
res[k] = json.loads(v)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
|
||||
_, extension = os.path.splitext(checkpoint_file)
|
||||
if extension.lower() == ".safetensors":
|
||||
device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
|
||||
|
||||
if not shared.opts.disable_mmap_load_safetensors:
|
||||
pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
|
||||
else:
|
||||
pl_sd = safetensors.torch.load(open(checkpoint_file, 'rb').read())
|
||||
pl_sd = {k: v.to(device) for k, v in pl_sd.items()}
|
||||
else:
|
||||
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
|
||||
|
||||
if print_global_state and "global_step" in pl_sd:
|
||||
print(f"Global Step: {pl_sd['global_step']}")
|
||||
if pl_sd == None:
|
||||
sd = {}
|
||||
else:
|
||||
sd = get_state_dict_from_checkpoint(pl_sd)
|
||||
return sd
|
||||
|
||||
|
||||
def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
|
||||
sd_model_hash = checkpoint_info.calculate_shorthash()
|
||||
timer.record("calculate hash")
|
||||
|
||||
if checkpoint_info in checkpoints_loaded:
|
||||
# use checkpoint cache
|
||||
print(f"Loading weights [{sd_model_hash}] from cache")
|
||||
# move to end as latest
|
||||
checkpoints_loaded.move_to_end(checkpoint_info)
|
||||
return checkpoints_loaded[checkpoint_info]
|
||||
|
||||
print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
|
||||
res = read_state_dict(checkpoint_info.filename)
|
||||
timer.record("load weights from disk")
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class SkipWritingToConfig:
|
||||
"""This context manager prevents load_model_weights from writing checkpoint name to the config when it loads weight."""
|
||||
|
||||
skip = False
|
||||
previous = None
|
||||
|
||||
def __enter__(self):
|
||||
self.previous = SkipWritingToConfig.skip
|
||||
SkipWritingToConfig.skip = True
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
SkipWritingToConfig.skip = self.previous
|
||||
|
||||
|
||||
def check_fp8(model):
|
||||
if model is None:
|
||||
return None
|
||||
if devices.get_optimal_device_name() == "mps":
|
||||
enable_fp8 = False
|
||||
elif shared.opts.fp8_storage == "Enable":
|
||||
enable_fp8 = True
|
||||
elif getattr(model, "is_sdxl", False) and shared.opts.fp8_storage == "Enable for SDXL":
|
||||
enable_fp8 = True
|
||||
else:
|
||||
enable_fp8 = False
|
||||
return enable_fp8
|
||||
|
||||
|
||||
def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
|
||||
sd_model_hash = checkpoint_info.calculate_shorthash()
|
||||
timer.record("calculate hash")
|
||||
|
||||
if devices.fp8:
|
||||
# prevent model to load state dict in fp8
|
||||
model.half()
|
||||
|
||||
if not SkipWritingToConfig.skip:
|
||||
shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
|
||||
|
||||
if state_dict is None:
|
||||
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
|
||||
|
||||
model.is_sdxl = hasattr(model, 'conditioner')
|
||||
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
|
||||
model.is_sd1 = not model.is_sdxl and not model.is_sd2
|
||||
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
|
||||
if model.is_sdxl:
|
||||
sd_models_xl.extend_sdxl(model)
|
||||
|
||||
if model.is_ssd:
|
||||
sd_hijack.model_hijack.convert_sdxl_to_ssd(model)
|
||||
|
||||
if shared.opts.sd_checkpoint_cache > 0:
|
||||
# cache newly loaded model
|
||||
checkpoints_loaded[checkpoint_info] = state_dict.copy()
|
||||
|
||||
model.load_state_dict(state_dict, strict=False)
|
||||
timer.record("apply weights to model")
|
||||
|
||||
del state_dict
|
||||
|
||||
if shared.cmd_opts.opt_channelslast:
|
||||
model.to(memory_format=torch.channels_last)
|
||||
timer.record("apply channels_last")
|
||||
|
||||
if shared.cmd_opts.no_half:
|
||||
model.float()
|
||||
model.alphas_cumprod_original = model.alphas_cumprod
|
||||
devices.dtype_unet = torch.float32
|
||||
timer.record("apply float()")
|
||||
else:
|
||||
vae = model.first_stage_model
|
||||
depth_model = getattr(model, 'depth_model', None)
|
||||
|
||||
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
|
||||
if shared.cmd_opts.no_half_vae:
|
||||
model.first_stage_model = None
|
||||
# with --upcast-sampling, don't convert the depth model weights to float16
|
||||
if shared.cmd_opts.upcast_sampling and depth_model:
|
||||
model.depth_model = None
|
||||
|
||||
alphas_cumprod = model.alphas_cumprod
|
||||
model.alphas_cumprod = None
|
||||
model.half()
|
||||
model.alphas_cumprod = alphas_cumprod
|
||||
model.alphas_cumprod_original = alphas_cumprod
|
||||
model.first_stage_model = vae
|
||||
if depth_model:
|
||||
model.depth_model = depth_model
|
||||
|
||||
devices.dtype_unet = torch.float16
|
||||
timer.record("apply half()")
|
||||
|
||||
apply_alpha_schedule_override(model)
|
||||
|
||||
for module in model.modules():
|
||||
if hasattr(module, 'fp16_weight'):
|
||||
del module.fp16_weight
|
||||
if hasattr(module, 'fp16_bias'):
|
||||
del module.fp16_bias
|
||||
|
||||
if check_fp8(model):
|
||||
devices.fp8 = True
|
||||
first_stage = model.first_stage_model
|
||||
model.first_stage_model = None
|
||||
for module in model.modules():
|
||||
if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
|
||||
if shared.opts.cache_fp16_weight:
|
||||
module.fp16_weight = module.weight.data.clone().cpu().half()
|
||||
if module.bias is not None:
|
||||
module.fp16_bias = module.bias.data.clone().cpu().half()
|
||||
module.to(torch.float8_e4m3fn)
|
||||
model.first_stage_model = first_stage
|
||||
timer.record("apply fp8")
|
||||
else:
|
||||
devices.fp8 = False
|
||||
|
||||
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
||||
|
||||
model.first_stage_model.to(devices.dtype_vae)
|
||||
timer.record("apply dtype to VAE")
|
||||
|
||||
# clean up cache if limit is reached
|
||||
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
|
||||
checkpoints_loaded.popitem(last=False)
|
||||
|
||||
model.sd_model_hash = sd_model_hash
|
||||
model.sd_model_checkpoint = checkpoint_info.filename
|
||||
model.sd_checkpoint_info = checkpoint_info
|
||||
shared.opts.data["sd_checkpoint_hash"] = checkpoint_info.sha256
|
||||
|
||||
if hasattr(model, 'logvar'):
|
||||
model.logvar = model.logvar.to(devices.device) # fix for training
|
||||
|
||||
sd_vae.delete_base_vae()
|
||||
sd_vae.clear_loaded_vae()
|
||||
vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename).tuple()
|
||||
sd_vae.load_vae(model, vae_file, vae_source)
|
||||
timer.record("load VAE")
|
||||
|
||||
|
||||
def enable_midas_autodownload():
|
||||
"""
|
||||
Gives the ldm.modules.midas.api.load_model function automatic downloading.
|
||||
|
||||
When the 512-depth-ema model, and other future models like it, is loaded,
|
||||
it calls midas.api.load_model to load the associated midas depth model.
|
||||
This function applies a wrapper to download the model to the correct
|
||||
location automatically.
|
||||
"""
|
||||
|
||||
midas_path = os.path.join(paths.models_path, 'midas')
|
||||
|
||||
# stable-diffusion-stability-ai hard-codes the midas model path to
|
||||
# a location that differs from where other scripts using this model look.
|
||||
# HACK: Overriding the path here.
|
||||
for k, v in midas.api.ISL_PATHS.items():
|
||||
file_name = os.path.basename(v)
|
||||
midas.api.ISL_PATHS[k] = os.path.join(midas_path, file_name)
|
||||
|
||||
midas_urls = {
|
||||
"dpt_large": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
|
||||
"dpt_hybrid": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
|
||||
"midas_v21": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
|
||||
"midas_v21_small": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
|
||||
}
|
||||
|
||||
midas.api.load_model_inner = midas.api.load_model
|
||||
|
||||
def load_model_wrapper(model_type):
|
||||
path = midas.api.ISL_PATHS[model_type]
|
||||
if not os.path.exists(path):
|
||||
if not os.path.exists(midas_path):
|
||||
mkdir(midas_path)
|
||||
|
||||
print(f"Downloading midas model weights for {model_type} to {path}")
|
||||
request.urlretrieve(midas_urls[model_type], path)
|
||||
print(f"{model_type} downloaded")
|
||||
|
||||
return midas.api.load_model_inner(model_type)
|
||||
|
||||
midas.api.load_model = load_model_wrapper
|
||||
|
||||
|
||||
def patch_given_betas():
|
||||
import ldm.models.diffusion.ddpm
|
||||
|
||||
def patched_register_schedule(*args, **kwargs):
|
||||
"""a modified version of register_schedule function that converts plain list from Omegaconf into numpy"""
|
||||
|
||||
if isinstance(args[1], ListConfig):
|
||||
args = (args[0], np.array(args[1]), *args[2:])
|
||||
|
||||
original_register_schedule(*args, **kwargs)
|
||||
|
||||
original_register_schedule = patches.patch(__name__, ldm.models.diffusion.ddpm.DDPM, 'register_schedule', patched_register_schedule)
|
||||
|
||||
|
||||
def repair_config(sd_config):
|
||||
|
||||
if not hasattr(sd_config.model.params, "use_ema"):
|
||||
sd_config.model.params.use_ema = False
|
||||
|
||||
if hasattr(sd_config.model.params, 'unet_config'):
|
||||
if shared.cmd_opts.no_half:
|
||||
sd_config.model.params.unet_config.params.use_fp16 = False
|
||||
elif shared.cmd_opts.upcast_sampling:
|
||||
sd_config.model.params.unet_config.params.use_fp16 = True
|
||||
|
||||
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
|
||||
sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
|
||||
|
||||
# For UnCLIP-L, override the hardcoded karlo directory
|
||||
if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
|
||||
karlo_path = os.path.join(paths.models_path, 'karlo')
|
||||
sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
|
||||
|
||||
|
||||
def rescale_zero_terminal_snr_abar(alphas_cumprod):
|
||||
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
||||
|
||||
# Store old values.
|
||||
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
||||
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
||||
|
||||
# Shift so the last timestep is zero.
|
||||
alphas_bar_sqrt -= (alphas_bar_sqrt_T)
|
||||
|
||||
# Scale so the first timestep is back to the old value.
|
||||
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
||||
|
||||
# Convert alphas_bar_sqrt to betas
|
||||
alphas_bar = alphas_bar_sqrt ** 2 # Revert sqrt
|
||||
alphas_bar[-1] = 4.8973451890853435e-08
|
||||
return alphas_bar
|
||||
|
||||
|
||||
def apply_alpha_schedule_override(sd_model, p=None):
|
||||
"""
|
||||
Applies an override to the alpha schedule of the model according to settings.
|
||||
- downcasts the alpha schedule to half precision
|
||||
- rescales the alpha schedule to have zero terminal SNR
|
||||
"""
|
||||
|
||||
if not hasattr(sd_model, 'alphas_cumprod') or not hasattr(sd_model, 'alphas_cumprod_original'):
|
||||
return
|
||||
|
||||
sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device)
|
||||
|
||||
if opts.use_downcasted_alpha_bar:
|
||||
if p is not None:
|
||||
p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar
|
||||
sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device)
|
||||
|
||||
if opts.sd_noise_schedule == "Zero Terminal SNR":
|
||||
if p is not None:
|
||||
p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule
|
||||
sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device)
|
||||
|
||||
|
||||
sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
|
||||
sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
|
||||
sdxl_clip_weight = 'conditioner.embedders.1.model.ln_final.weight'
|
||||
sdxl_refiner_clip_weight = 'conditioner.embedders.0.model.ln_final.weight'
|
||||
|
||||
|
||||
class SdModelData:
|
||||
def __init__(self):
|
||||
self.sd_model = None
|
||||
self.loaded_sd_models = []
|
||||
self.was_loaded_at_least_once = False
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def get_sd_model(self):
|
||||
if self.was_loaded_at_least_once:
|
||||
return self.sd_model
|
||||
|
||||
if self.sd_model is None:
|
||||
with self.lock:
|
||||
if self.sd_model is not None or self.was_loaded_at_least_once:
|
||||
return self.sd_model
|
||||
|
||||
try:
|
||||
load_model()
|
||||
|
||||
except Exception as e:
|
||||
errors.display(e, "loading stable diffusion model", full_traceback=True)
|
||||
print("", file=sys.stderr)
|
||||
print("Stable diffusion model failed to load", file=sys.stderr)
|
||||
self.sd_model = None
|
||||
|
||||
return self.sd_model
|
||||
|
||||
def set_sd_model(self, v, already_loaded=False):
|
||||
self.sd_model = v
|
||||
if already_loaded:
|
||||
sd_vae.base_vae = getattr(v, "base_vae", None)
|
||||
sd_vae.loaded_vae_file = getattr(v, "loaded_vae_file", None)
|
||||
sd_vae.checkpoint_info = v.sd_checkpoint_info
|
||||
|
||||
try:
|
||||
self.loaded_sd_models.remove(v)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if v is not None:
|
||||
self.loaded_sd_models.insert(0, v)
|
||||
|
||||
|
||||
model_data = SdModelData()
|
||||
|
||||
|
||||
def get_empty_cond(sd_model):
|
||||
|
||||
p = processing.StableDiffusionProcessingTxt2Img()
|
||||
extra_networks.activate(p, {})
|
||||
|
||||
if hasattr(sd_model, 'conditioner'):
|
||||
d = sd_model.get_learned_conditioning([""])
|
||||
return d['crossattn']
|
||||
else:
|
||||
return sd_model.cond_stage_model([""])
|
||||
|
||||
|
||||
def send_model_to_cpu(m):
|
||||
if m.lowvram:
|
||||
lowvram.send_everything_to_cpu()
|
||||
else:
|
||||
m.to(devices.cpu)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
|
||||
def model_target_device(m):
|
||||
if lowvram.is_needed(m):
|
||||
return devices.cpu
|
||||
else:
|
||||
return devices.device
|
||||
|
||||
|
||||
def send_model_to_device(m):
|
||||
lowvram.apply(m)
|
||||
|
||||
if not m.lowvram:
|
||||
m.to(shared.device)
|
||||
|
||||
|
||||
def send_model_to_trash(m):
|
||||
m.to(device="meta")
|
||||
devices.torch_gc()
|
||||
|
||||
|
||||
def load_model(checkpoint_info=None, already_loaded_state_dict=None):
|
||||
from modules import sd_hijack
|
||||
checkpoint_info = checkpoint_info or select_checkpoint()
|
||||
|
||||
timer = Timer()
|
||||
|
||||
if model_data.sd_model:
|
||||
send_model_to_trash(model_data.sd_model)
|
||||
model_data.sd_model = None
|
||||
devices.torch_gc()
|
||||
|
||||
timer.record("unload existing model")
|
||||
|
||||
if already_loaded_state_dict is not None:
|
||||
state_dict = already_loaded_state_dict
|
||||
else:
|
||||
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
|
||||
|
||||
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
|
||||
clip_is_included_into_sd = any(x for x in [sd1_clip_weight, sd2_clip_weight, sdxl_clip_weight, sdxl_refiner_clip_weight] if x in state_dict)
|
||||
|
||||
timer.record("find config")
|
||||
|
||||
sd_config = OmegaConf.load(checkpoint_config)
|
||||
repair_config(sd_config)
|
||||
|
||||
timer.record("load config")
|
||||
|
||||
print(f"Creating model from config: {checkpoint_config}")
|
||||
|
||||
sd_model = None
|
||||
try:
|
||||
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
|
||||
with sd_disable_initialization.InitializeOnMeta():
|
||||
sd_model = instantiate_from_config(sd_config.model)
|
||||
|
||||
except Exception as e:
|
||||
errors.display(e, "creating model quickly", full_traceback=True)
|
||||
|
||||
if sd_model is None:
|
||||
print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
|
||||
|
||||
with sd_disable_initialization.InitializeOnMeta():
|
||||
sd_model = instantiate_from_config(sd_config.model)
|
||||
|
||||
sd_model.used_config = checkpoint_config
|
||||
|
||||
timer.record("create model")
|
||||
|
||||
if shared.cmd_opts.no_half:
|
||||
weight_dtype_conversion = None
|
||||
else:
|
||||
weight_dtype_conversion = {
|
||||
'first_stage_model': None,
|
||||
'alphas_cumprod': None,
|
||||
'': torch.float16,
|
||||
}
|
||||
|
||||
with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(sd_model), weight_dtype_conversion=weight_dtype_conversion):
|
||||
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
|
||||
timer.record("load weights from state dict")
|
||||
|
||||
send_model_to_device(sd_model)
|
||||
timer.record("move model to device")
|
||||
|
||||
sd_hijack.model_hijack.hijack(sd_model)
|
||||
|
||||
timer.record("hijack")
|
||||
|
||||
sd_model.eval()
|
||||
model_data.set_sd_model(sd_model)
|
||||
model_data.was_loaded_at_least_once = True
|
||||
|
||||
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
|
||||
|
||||
timer.record("load textual inversion embeddings")
|
||||
|
||||
script_callbacks.model_loaded_callback(sd_model)
|
||||
|
||||
timer.record("scripts callbacks")
|
||||
|
||||
with devices.autocast(), torch.no_grad():
|
||||
sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
|
||||
|
||||
timer.record("calculate empty prompt")
|
||||
|
||||
print(f"Model loaded in {timer.summary()}.")
|
||||
|
||||
return sd_model
|
||||
|
||||
|
||||
def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
|
||||
"""
|
||||
Checks if the desired checkpoint from checkpoint_info is not already loaded in model_data.loaded_sd_models.
|
||||
If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
|
||||
If not, returns the model that can be used to load weights from checkpoint_info's file.
|
||||
If no such model exists, returns None.
|
||||
Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
|
||||
"""
|
||||
|
||||
already_loaded = None
|
||||
for i in reversed(range(len(model_data.loaded_sd_models))):
|
||||
loaded_model = model_data.loaded_sd_models[i]
|
||||
if loaded_model.sd_checkpoint_info.filename == checkpoint_info.filename:
|
||||
already_loaded = loaded_model
|
||||
continue
|
||||
|
||||
if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0:
|
||||
print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}")
|
||||
model_data.loaded_sd_models.pop()
|
||||
send_model_to_trash(loaded_model)
|
||||
timer.record("send model to trash")
|
||||
|
||||
if shared.opts.sd_checkpoints_keep_in_cpu:
|
||||
send_model_to_cpu(sd_model)
|
||||
timer.record("send model to cpu")
|
||||
|
||||
if already_loaded is not None:
|
||||
send_model_to_device(already_loaded)
|
||||
timer.record("send model to device")
|
||||
|
||||
model_data.set_sd_model(already_loaded, already_loaded=True)
|
||||
|
||||
if not SkipWritingToConfig.skip:
|
||||
shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
|
||||
shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
|
||||
|
||||
print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
|
||||
sd_vae.reload_vae_weights(already_loaded)
|
||||
return model_data.sd_model
|
||||
elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
|
||||
print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})")
|
||||
|
||||
model_data.sd_model = None
|
||||
load_model(checkpoint_info)
|
||||
return model_data.sd_model
|
||||
elif len(model_data.loaded_sd_models) > 0:
|
||||
sd_model = model_data.loaded_sd_models.pop()
|
||||
model_data.sd_model = sd_model
|
||||
|
||||
sd_vae.base_vae = getattr(sd_model, "base_vae", None)
|
||||
sd_vae.loaded_vae_file = getattr(sd_model, "loaded_vae_file", None)
|
||||
sd_vae.checkpoint_info = sd_model.sd_checkpoint_info
|
||||
|
||||
print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}")
|
||||
return sd_model
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def reload_model_weights(sd_model=None, info=None, forced_reload=False):
|
||||
checkpoint_info = info or select_checkpoint()
|
||||
|
||||
timer = Timer()
|
||||
|
||||
if not sd_model:
|
||||
sd_model = model_data.sd_model
|
||||
|
||||
if sd_model is None: # previous model load failed
|
||||
current_checkpoint_info = None
|
||||
else:
|
||||
current_checkpoint_info = sd_model.sd_checkpoint_info
|
||||
if check_fp8(sd_model) != devices.fp8:
|
||||
# load from state dict again to prevent extra numerical errors
|
||||
forced_reload = True
|
||||
elif sd_model.sd_model_checkpoint == checkpoint_info.filename and not forced_reload:
|
||||
return sd_model
|
||||
|
||||
sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
|
||||
if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename:
|
||||
return sd_model
|
||||
|
||||
if sd_model is not None:
|
||||
sd_unet.apply_unet("None")
|
||||
send_model_to_cpu(sd_model)
|
||||
sd_hijack.model_hijack.undo_hijack(sd_model)
|
||||
|
||||
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
|
||||
|
||||
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
|
||||
|
||||
timer.record("find config")
|
||||
|
||||
if sd_model is None or checkpoint_config != sd_model.used_config:
|
||||
if sd_model is not None:
|
||||
send_model_to_trash(sd_model)
|
||||
|
||||
load_model(checkpoint_info, already_loaded_state_dict=state_dict)
|
||||
return model_data.sd_model
|
||||
|
||||
try:
|
||||
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
|
||||
except Exception:
|
||||
print("Failed to load checkpoint, restoring previous")
|
||||
load_model_weights(sd_model, current_checkpoint_info, None, timer)
|
||||
raise
|
||||
finally:
|
||||
sd_hijack.model_hijack.hijack(sd_model)
|
||||
timer.record("hijack")
|
||||
|
||||
if not sd_model.lowvram:
|
||||
sd_model.to(devices.device)
|
||||
timer.record("move model to device")
|
||||
|
||||
script_callbacks.model_loaded_callback(sd_model)
|
||||
timer.record("script callbacks")
|
||||
|
||||
print(f"Weights loaded in {timer.summary()}.")
|
||||
|
||||
model_data.set_sd_model(sd_model)
|
||||
sd_unet.apply_unet()
|
||||
|
||||
return sd_model
|
||||
|
||||
|
||||
def unload_model_weights(sd_model=None, info=None):
|
||||
send_model_to_cpu(sd_model or shared.sd_model)
|
||||
|
||||
return sd_model
|
||||
|
||||
|
||||
def apply_token_merging(sd_model, token_merging_ratio):
|
||||
"""
|
||||
Applies speed and memory optimizations from tomesd.
|
||||
"""
|
||||
|
||||
current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0)
|
||||
|
||||
if current_token_merging_ratio == token_merging_ratio:
|
||||
return
|
||||
|
||||
if current_token_merging_ratio > 0:
|
||||
tomesd.remove_patch(sd_model)
|
||||
|
||||
if token_merging_ratio > 0:
|
||||
tomesd.apply_patch(
|
||||
sd_model,
|
||||
ratio=token_merging_ratio,
|
||||
use_rand=False, # can cause issues with some samplers
|
||||
merge_attn=True,
|
||||
merge_crossattn=False,
|
||||
merge_mlp=False
|
||||
)
|
||||
|
||||
sd_model.applied_token_merged_ratio = token_merging_ratio
|
||||
18
services/AUTOMATIC1111/webui-macos-env.sh
Normal file
18
services/AUTOMATIC1111/webui-macos-env.sh
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
####################################################################
|
||||
# macOS defaults #
|
||||
# Please modify webui-user.sh to change these instead of this file #
|
||||
####################################################################
|
||||
|
||||
if [[ -x "$(command -v python3.10)" ]]
|
||||
then
|
||||
python_cmd="python3.10"
|
||||
fi
|
||||
|
||||
export install_dir="$HOME"
|
||||
export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
# export COMMANDLINE_ARGS="--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half"
|
||||
export TORCH_COMMAND="pip install torch==2.1.0 torchvision==0.16.0"
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
|
||||
####################################################################
|
||||
51
services/AUTOMATIC1111/webui-user.sh
Normal file
51
services/AUTOMATIC1111/webui-user.sh
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
#########################################################
|
||||
# Uncomment and change the variables below to your need:#
|
||||
#########################################################
|
||||
|
||||
# Install directory without trailing slash
|
||||
#install_dir="/home/$(whoami)"
|
||||
|
||||
# Name of the subdirectory
|
||||
#clone_dir="stable-diffusion-webui"
|
||||
|
||||
# Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
|
||||
# export COMMANDLINE_ARGS=""
|
||||
# export COMMANDLINE_ARGS="--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half"
|
||||
# export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
export COMMANDLINE_ARGS="--skip-torch-cuda-test --opt-sdp-attention --precision full --no-half --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
|
||||
# python3 executable
|
||||
#python_cmd="python3"
|
||||
|
||||
# git executable
|
||||
#export GIT="git"
|
||||
|
||||
# python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
|
||||
#venv_dir="venv"
|
||||
|
||||
# script to launch to start the app
|
||||
#export LAUNCH_SCRIPT="launch.py"
|
||||
|
||||
# install command for torch
|
||||
#export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113"
|
||||
|
||||
# Requirements file to use for stable-diffusion-webui
|
||||
#export REQS_FILE="requirements_versions.txt"
|
||||
|
||||
# Fixed git repos
|
||||
#export K_DIFFUSION_PACKAGE=""
|
||||
#export GFPGAN_PACKAGE=""
|
||||
|
||||
# Fixed git commits
|
||||
#export STABLE_DIFFUSION_COMMIT_HASH=""
|
||||
#export CODEFORMER_COMMIT_HASH=""
|
||||
#export BLIP_COMMIT_HASH=""
|
||||
|
||||
# Uncomment to enable accelerated launch
|
||||
#export ACCELERATE="True"
|
||||
|
||||
# Uncomment to disable TCMalloc
|
||||
#export NO_TCMALLOC="True"
|
||||
|
||||
###########################################
|
||||
165
services/AUTOMATIC1111/webui.py
Normal file
165
services/AUTOMATIC1111/webui.py
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from modules import timer
|
||||
from modules import initialize_util
|
||||
from modules import initialize
|
||||
|
||||
startup_timer = timer.startup_timer
|
||||
startup_timer.record("launcher")
|
||||
|
||||
initialize.imports()
|
||||
|
||||
initialize.check_versions()
|
||||
|
||||
|
||||
def create_api(app):
|
||||
from modules.api.api import Api
|
||||
from modules.call_queue import queue_lock
|
||||
|
||||
api = Api(app, queue_lock)
|
||||
return api
|
||||
|
||||
|
||||
def api_only():
|
||||
from fastapi import FastAPI
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
initialize.initialize()
|
||||
|
||||
app = FastAPI()
|
||||
initialize_util.setup_middleware(app)
|
||||
api = create_api(app)
|
||||
|
||||
from modules import script_callbacks
|
||||
script_callbacks.before_ui_callback()
|
||||
script_callbacks.app_started_callback(None, app)
|
||||
|
||||
print(f"Startup time: {startup_timer.summary()}.")
|
||||
api.launch(
|
||||
server_name=initialize_util.gradio_server_name(),
|
||||
port=cmd_opts.port if cmd_opts.port else 7861,
|
||||
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else ""
|
||||
)
|
||||
|
||||
|
||||
def webui():
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
launch_api = cmd_opts.api
|
||||
initialize.initialize()
|
||||
|
||||
from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
|
||||
|
||||
while 1:
|
||||
if shared.opts.clean_temp_dir_at_start:
|
||||
ui_tempdir.cleanup_tmpdr()
|
||||
startup_timer.record("cleanup temp dir")
|
||||
|
||||
script_callbacks.before_ui_callback()
|
||||
startup_timer.record("scripts before_ui_callback")
|
||||
|
||||
shared.demo = ui.create_ui()
|
||||
startup_timer.record("create ui")
|
||||
|
||||
if not cmd_opts.no_gradio_queue:
|
||||
shared.demo.queue(64)
|
||||
|
||||
gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None
|
||||
|
||||
auto_launch_browser = False
|
||||
if os.getenv('SD_WEBUI_RESTARTING') != '1':
|
||||
if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch:
|
||||
auto_launch_browser = True
|
||||
elif shared.opts.auto_launch_browser == "Local":
|
||||
auto_launch_browser = not cmd_opts.webui_is_non_local
|
||||
print("-"*60)
|
||||
print(f"cmd_opts.share: {cmd_opts.share}")
|
||||
print(f"set share=True")
|
||||
app, local_url, share_url = shared.demo.launch(
|
||||
# share=cmd_opts.share,
|
||||
share=True,
|
||||
server_name=initialize_util.gradio_server_name(),
|
||||
server_port=cmd_opts.port,
|
||||
ssl_keyfile=cmd_opts.tls_keyfile,
|
||||
ssl_certfile=cmd_opts.tls_certfile,
|
||||
ssl_verify=cmd_opts.disable_tls_verify,
|
||||
debug=cmd_opts.gradio_debug,
|
||||
auth=gradio_auth_creds,
|
||||
inbrowser=auto_launch_browser,
|
||||
prevent_thread_lock=True,
|
||||
allowed_paths=cmd_opts.gradio_allowed_path,
|
||||
app_kwargs={
|
||||
"docs_url": "/docs",
|
||||
"redoc_url": "/redoc",
|
||||
},
|
||||
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "",
|
||||
)
|
||||
|
||||
startup_timer.record("gradio launch")
|
||||
|
||||
# gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
|
||||
# an attacker to trick the user into opening a malicious HTML page, which makes a request to the
|
||||
# running web ui and do whatever the attacker wants, including installing an extension and
|
||||
# running its code. We disable this here. Suggested by RyotaK.
|
||||
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
|
||||
|
||||
initialize_util.setup_middleware(app)
|
||||
|
||||
progress.setup_progress_api(app)
|
||||
ui.setup_ui_api(app)
|
||||
|
||||
if launch_api:
|
||||
create_api(app)
|
||||
|
||||
ui_extra_networks.add_pages_to_demo(app)
|
||||
|
||||
startup_timer.record("add APIs")
|
||||
|
||||
with startup_timer.subcategory("app_started_callback"):
|
||||
script_callbacks.app_started_callback(shared.demo, app)
|
||||
|
||||
timer.startup_record = startup_timer.dump()
|
||||
print(f"Startup time: {startup_timer.summary()}.")
|
||||
|
||||
try:
|
||||
while True:
|
||||
server_command = shared.state.wait_for_server_command(timeout=5)
|
||||
if server_command:
|
||||
if server_command in ("stop", "restart"):
|
||||
break
|
||||
else:
|
||||
print(f"Unknown server command: {server_command}")
|
||||
except KeyboardInterrupt:
|
||||
print('Caught KeyboardInterrupt, stopping...')
|
||||
server_command = "stop"
|
||||
|
||||
if server_command == "stop":
|
||||
print("Stopping server...")
|
||||
# If we catch a keyboard interrupt, we want to stop the server and exit.
|
||||
shared.demo.close()
|
||||
break
|
||||
|
||||
# disable auto launch webui in browser for subsequent UI Reload
|
||||
os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
|
||||
|
||||
print('Restarting UI...')
|
||||
shared.demo.close()
|
||||
time.sleep(0.5)
|
||||
startup_timer.reset()
|
||||
script_callbacks.app_reload_callback()
|
||||
startup_timer.record("app reload callback")
|
||||
script_callbacks.script_unloaded_callback()
|
||||
startup_timer.record("scripts unloaded callback")
|
||||
initialize.initialize_rest(reload_script_modules=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
if cmd_opts.nowebui:
|
||||
api_only()
|
||||
else:
|
||||
webui()
|
||||
292
services/AUTOMATIC1111/webui.sh
Executable file
292
services/AUTOMATIC1111/webui.sh
Executable file
|
|
@ -0,0 +1,292 @@
|
|||
#!/usr/bin/env bash
|
||||
#################################################
|
||||
# Please do not make any changes to this file, #
|
||||
# change the variables in webui-user.sh instead #
|
||||
#################################################
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
|
||||
# If run from macOS, load defaults from webui-macos-env.sh
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
if [[ -f "$SCRIPT_DIR"/webui-macos-env.sh ]]
|
||||
then
|
||||
source "$SCRIPT_DIR"/webui-macos-env.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Read variables from webui-user.sh
|
||||
# shellcheck source=/dev/null
|
||||
if [[ -f "$SCRIPT_DIR"/webui-user.sh ]]
|
||||
then
|
||||
source "$SCRIPT_DIR"/webui-user.sh
|
||||
fi
|
||||
|
||||
# If $venv_dir is "-", then disable venv support
|
||||
use_venv=1
|
||||
if [[ $venv_dir == "-" ]]; then
|
||||
use_venv=0
|
||||
fi
|
||||
|
||||
# Set defaults
|
||||
# Install directory without trailing slash
|
||||
if [[ -z "${install_dir}" ]]
|
||||
then
|
||||
install_dir="$SCRIPT_DIR"
|
||||
fi
|
||||
|
||||
# Name of the subdirectory (defaults to stable-diffusion-webui)
|
||||
if [[ -z "${clone_dir}" ]]
|
||||
then
|
||||
clone_dir="stable-diffusion-webui"
|
||||
fi
|
||||
|
||||
# python3 executable
|
||||
if [[ -z "${python_cmd}" ]]
|
||||
then
|
||||
python_cmd="python3"
|
||||
fi
|
||||
|
||||
# git executable
|
||||
if [[ -z "${GIT}" ]]
|
||||
then
|
||||
export GIT="git"
|
||||
else
|
||||
export GIT_PYTHON_GIT_EXECUTABLE="${GIT}"
|
||||
fi
|
||||
|
||||
# python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
|
||||
if [[ -z "${venv_dir}" ]] && [[ $use_venv -eq 1 ]]
|
||||
then
|
||||
venv_dir="venv"
|
||||
fi
|
||||
|
||||
if [[ -z "${LAUNCH_SCRIPT}" ]]
|
||||
then
|
||||
LAUNCH_SCRIPT="launch.py"
|
||||
fi
|
||||
|
||||
# this script cannot be run as root by default
|
||||
can_run_as_root=0
|
||||
|
||||
# read any command line flags to the webui.sh script
|
||||
while getopts "f" flag > /dev/null 2>&1
|
||||
do
|
||||
case ${flag} in
|
||||
f) can_run_as_root=1;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Disable sentry logging
|
||||
export ERROR_REPORTING=FALSE
|
||||
|
||||
# Do not reinstall existing pip packages on Debian/Ubuntu
|
||||
export PIP_IGNORE_INSTALLED=0
|
||||
|
||||
# Pretty print
|
||||
delimiter="################################################################"
|
||||
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n"
|
||||
printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
|
||||
# Do not run as root
|
||||
if [[ $(id -u) -eq 0 && can_run_as_root -eq 0 ]]
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[31mERROR: This script must not be launched as root, aborting...\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
exit 1
|
||||
else
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Running on \e[1m\e[32m%s\e[0m user" "$(whoami)"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
fi
|
||||
|
||||
if [[ $(getconf LONG_BIT) = 32 ]]
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[31mERROR: Unsupported Running on a 32bit OS\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -d .git ]]
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Repo already cloned, using it as install directory"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
install_dir="${PWD}/../"
|
||||
clone_dir="${PWD##*/}"
|
||||
fi
|
||||
|
||||
# Check prerequisites
|
||||
gpu_info=$(lspci 2>/dev/null | grep -E "VGA|Display")
|
||||
case "$gpu_info" in
|
||||
*"Navi 1"*)
|
||||
export HSA_OVERRIDE_GFX_VERSION=10.3.0
|
||||
if [[ -z "${TORCH_COMMAND}" ]]
|
||||
then
|
||||
pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')"
|
||||
if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]]
|
||||
then
|
||||
# Navi users will still use torch 1.13 because 2.0 does not seem to work.
|
||||
export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6"
|
||||
else
|
||||
printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
|
||||
;;
|
||||
*"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
|
||||
export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
|
||||
;;
|
||||
*"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
if ! echo "$gpu_info" | grep -q "NVIDIA";
|
||||
then
|
||||
if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
|
||||
then
|
||||
export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2"
|
||||
elif echo "$gpu_info" | grep -q "Huawei" && [[ -z "${TORCH_COMMAND}" ]]
|
||||
then
|
||||
export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu"
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
for preq in "${GIT}" "${python_cmd}"
|
||||
do
|
||||
if ! hash "${preq}" &>/dev/null
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[31mERROR: %s is not installed, aborting...\e[0m" "${preq}"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $use_venv -eq 1 ]] && ! "${python_cmd}" -c "import venv" &>/dev/null
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[31mERROR: python3-venv is not installed, aborting...\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; }
|
||||
if [[ -d "${clone_dir}" ]]
|
||||
then
|
||||
cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
|
||||
else
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Clone stable-diffusion-webui"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
"${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}"
|
||||
cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
|
||||
fi
|
||||
|
||||
if [[ $use_venv -eq 1 ]] && [[ -z "${VIRTUAL_ENV}" ]];
|
||||
then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Create and activate python venv"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
cd "${install_dir}"/"${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
|
||||
if [[ ! -d "${venv_dir}" ]]
|
||||
then
|
||||
"${python_cmd}" -m venv "${venv_dir}"
|
||||
first_launch=1
|
||||
fi
|
||||
# shellcheck source=/dev/null
|
||||
if [[ -f "${venv_dir}"/bin/activate ]]
|
||||
then
|
||||
source "${venv_dir}"/bin/activate
|
||||
else
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[31mERROR: Cannot activate python venv, aborting...\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "python venv already activate or run without venv: ${VIRTUAL_ENV}"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
fi
|
||||
|
||||
# Try using TCMalloc on Linux
|
||||
prepare_tcmalloc() {
|
||||
if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
|
||||
# check glibc version
|
||||
LIBC_VER=$(echo $(ldd --version | awk 'NR==1 {print $NF}') | grep -oP '\d+\.\d+')
|
||||
echo "glibc version is $LIBC_VER"
|
||||
libc_vernum=$(expr $LIBC_VER)
|
||||
# Since 2.34 libpthread is integrated into libc.so
|
||||
libc_v234=2.34
|
||||
# Define Tcmalloc Libs arrays
|
||||
TCMALLOC_LIBS=("libtcmalloc(_minimal|)\.so\.\d" "libtcmalloc\.so\.\d")
|
||||
# Traversal array
|
||||
for lib in "${TCMALLOC_LIBS[@]}"
|
||||
do
|
||||
# Determine which type of tcmalloc library the library supports
|
||||
TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)"
|
||||
TC_INFO=(${TCMALLOC//=>/})
|
||||
if [[ ! -z "${TC_INFO}" ]]; then
|
||||
echo "Check TCMalloc: ${TC_INFO}"
|
||||
# Determine if the library is linked to libpthread and resolve undefined symbol: pthread_key_create
|
||||
if [ $(echo "$libc_vernum < $libc_v234" | bc) -eq 1 ]; then
|
||||
# glibc < 2.34 pthread_key_create into libpthread.so. check linking libpthread.so...
|
||||
if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then
|
||||
echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO[2]}"
|
||||
# set fullpath LD_PRELOAD (To be on the safe side)
|
||||
export LD_PRELOAD="${TC_INFO[2]}"
|
||||
break
|
||||
else
|
||||
echo "$TC_INFO is not linked with libpthread will trigger undefined symbol: pthread_Key_create error"
|
||||
fi
|
||||
else
|
||||
# Version 2.34 of libc.so (glibc) includes the pthread library IN GLIBC. (USE ubuntu 22.04 and modern linux system and WSL)
|
||||
# libc.so(glibc) is linked with a library that works in ALMOST ALL Linux userlands. SO NO CHECK!
|
||||
echo "$TC_INFO is linked with libc.so,execute LD_PRELOAD=${TC_INFO[2]}"
|
||||
# set fullpath LD_PRELOAD (To be on the safe side)
|
||||
export LD_PRELOAD="${TC_INFO[2]}"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ -z "${LD_PRELOAD}" ]]; then
|
||||
printf "\e[1m\e[31mCannot locate TCMalloc. Do you have tcmalloc or google-perftool installed on your system? (improves CPU memory usage)\e[0m\n"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
KEEP_GOING=1
|
||||
export SD_WEBUI_RESTART=tmp/restart
|
||||
while [[ "$KEEP_GOING" -eq "1" ]]; do
|
||||
if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ]; then
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Accelerating launch.py..."
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
prepare_tcmalloc
|
||||
accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
|
||||
else
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "Launching launch.py..."
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
prepare_tcmalloc
|
||||
"${python_cmd}" -u "${LAUNCH_SCRIPT}" "$@"
|
||||
fi
|
||||
|
||||
if [[ ! -f tmp/restart ]]; then
|
||||
KEEP_GOING=0
|
||||
fi
|
||||
done
|
||||
Loading…
Reference in a new issue