text-generation-webui/requirements/full/requirements_cpu_only.txt

47 lines
1.1 KiB
Plaintext

accelerate==1.8.*
audioop-lts<1.0; python_version >= "3.13"
colorama
datasets
einops
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==0.36.0
jinja2==3.1.6
markdown
numpy==2.2.*
optimum-quanto==0.2.7
pandas
peft==0.18.*
Pillow>=9.5.0
psutil
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
safetensors==0.6.*
scipy
sentencepiece
tensorboard
transformers==4.57.*
triton-windows==3.5.1.post21; platform_system == "Windows"
tqdm
wandb
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# Diffusers
diffusers @ git+https://github.com/huggingface/diffusers.git@edf36f5128abf3e6ecf92b5145115514363c58e6
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# llama.cpp (CPU only, AVX2)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.62.0/llama_cpp_binaries-0.62.0+cpuavx2-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.62.0/llama_cpp_binaries-0.62.0+cpuavx2-py3-none-win_amd64.whl; platform_system == "Windows"