mirror of
https://github.com/neonbjb/tortoise-tts.git
synced 2026-03-04 12:34:34 +01:00
commit
5415d47a1d
34
Dockerfile
Normal file
34
Dockerfile
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
FROM nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
|
||||
COPY . /app
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-unauthenticated --no-install-recommends \
|
||||
wget \
|
||||
git \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV HOME "/root"
|
||||
ENV CONDA_DIR "${HOME}/miniconda"
|
||||
ENV PATH="$CONDA_DIR/bin":$PATH
|
||||
ENV CONDA_AUTO_UPDATE_CONDA=false
|
||||
ENV PIP_DOWNLOAD_CACHE="$HOME/.pip/cache"
|
||||
ENV TORTOISE_MODELS_DIR
|
||||
|
||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda3.sh \
|
||||
&& bash /tmp/miniconda3.sh -b -p "${CONDA_DIR}" -f -u \
|
||||
&& "${CONDA_DIR}/bin/conda" init bash \
|
||||
&& rm -f /tmp/miniconda3.sh \
|
||||
&& echo ". '${CONDA_DIR}/etc/profile.d/conda.sh'" >> "${HOME}/.profile"
|
||||
|
||||
# --login option used to source bashrc (thus activating conda env) at every RUN statement
|
||||
SHELL ["/bin/bash", "--login", "-c"]
|
||||
|
||||
RUN conda create --name tortoise python=3.9 numba inflect \
|
||||
&& conda activate tortoise \
|
||||
&& conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia \
|
||||
&& conda install transformers=4.29.2 \
|
||||
&& cd /app \
|
||||
&& python setup.py install
|
||||
32
README.md
32
README.md
|
|
@ -91,6 +91,38 @@ Optionally, pytorch can be installed in the base environment, so that other cond
|
|||
|
||||
If you are on windows, you may also need to install pysoundfile: `conda install -c conda-forge pysoundfile`
|
||||
|
||||
### Docker
|
||||
|
||||
An easy way to hit the ground running and a good jumping off point depending on your use case.
|
||||
|
||||
```sh
|
||||
git clone https://github.com/neonbjb/tortoise-tts.git
|
||||
cd tortoise-tts
|
||||
|
||||
docker build . -t tts
|
||||
|
||||
docker run --gpus all \
|
||||
-e TORTOISE_MODELS_DIR=/models \
|
||||
-v /mnt/user/data/tortoise_tts/models:/models \
|
||||
-v /mnt/user/data/tortoise_tts/results:/results \
|
||||
-v /mnt/user/data/.cache/huggingface:/root/.cache/huggingface \
|
||||
-v /root:/work \
|
||||
-it tts
|
||||
```
|
||||
This gives you an interactive terminal in an environment that's ready to do some tts. Now you can explore the different interfaces that tortoise exposes for tts.
|
||||
|
||||
For example:
|
||||
|
||||
```sh
|
||||
cd app
|
||||
conda activate tortoise
|
||||
time python tortoise/do_tts.py \
|
||||
--output_path /results \
|
||||
--preset ultra_fast \
|
||||
--voice geralt \
|
||||
--text "Time flies like an arrow; fruit flies like a bananna."
|
||||
```
|
||||
|
||||
## Apple Silicon
|
||||
|
||||
On MacOS 13+ with M1/M2 chips you need to install the nighly version of pytorch, as stated in the official page you can do:
|
||||
|
|
|
|||
Loading…
Reference in a new issue