Update llama.cpp, remove noavx2 builds, add ROCm Windows portable builds

This commit is contained in:
oobabooga 2026-03-03 15:22:16 -08:00
parent d7dd533b99
commit b8fcc8ea32
24 changed files with 64 additions and 362 deletions

View file

@ -59,7 +59,6 @@ jobs:
$matrix = @{
'os' = @('ubuntu-22.04', 'windows-2022')
'pyver' = @("3.13")
'avx' = @("AVX2")
'cuda' = @("12.4")
}
@ -75,7 +74,7 @@ jobs:
Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
build_wheels:
name: ${{ matrix.os }} ${{ matrix.pyver }} CPU ${{ matrix.avx }} CUDA ${{ matrix.cuda }}
name: ${{ matrix.os }} ${{ matrix.pyver }} CUDA ${{ matrix.cuda }}
needs: define_matrix
runs-on: ${{ matrix.os }}
strategy:
@ -84,7 +83,6 @@ jobs:
run:
shell: pwsh
env:
AVXVER: ${{ matrix.avx }}
PCKGVER: ${{ inputs.version }}
steps:
@ -113,7 +111,6 @@ jobs:
# Define common variables
CUDA_VERSION="${{ matrix.cuda }}"
AVX_SUPPORT="${{ matrix.avx }}"
VERSION="${{ inputs.version }}"
# 1. Set platform-specific variables
@ -138,16 +135,9 @@ jobs:
tar -xzf python-build.tar.gz
mv python "text-generation-webui-${VERSION_CLEAN}/portable_env"
# 3. Prepare requirements file based on AVX and CUDA
if [[ "$AVX_SUPPORT" == "AVX2" ]]; then
BASE_REQ_FILE="requirements/portable/requirements.txt"
else
BASE_REQ_FILE="requirements/portable/requirements_noavx2.txt"
fi
# Create CUDA-specific requirements file if needed
# 3. Prepare requirements file
cd "text-generation-webui-${VERSION_CLEAN}"
REQ_FILE="$BASE_REQ_FILE"
REQ_FILE="requirements/portable/requirements.txt"
# 4. Install packages
echo "Installing Python packages from $REQ_FILE..."

View file

@ -57,9 +57,8 @@ jobs:
id: set-matrix
run: |
$matrix = @{
'os' = @('ubuntu-22.04')
'os' = @('ubuntu-22.04', 'windows-2022')
'pyver' = @("3.13")
'avx' = @("AVX2")
}
if ($env:CONFIGIN -ne 'Default') {$env:CONFIGIN.split(';').foreach({$matrix[$_.split(':')[0]] = $_.split(':')[1].split(',')})}
@ -74,7 +73,7 @@ jobs:
Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
build_wheels:
name: ${{ matrix.os }} ${{ matrix.pyver }} CPU ${{ matrix.avx }}
name: ${{ matrix.os }} ${{ matrix.pyver }}
needs: define_matrix
runs-on: ${{ matrix.os }}
strategy:
@ -83,7 +82,6 @@ jobs:
run:
shell: pwsh
env:
AVXVER: ${{ matrix.avx }}
PCKGVER: ${{ inputs.version }}
steps:
@ -111,15 +109,22 @@ jobs:
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
# Define common variables
AVX_SUPPORT="${{ matrix.avx }}"
VERSION="${{ inputs.version }}"
# 1. Set platform-specific variables (Linux only for ROCm)
PLATFORM="linux"
PYTHON_URL="https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.13.12+20260303-x86_64-unknown-linux-gnu-install_only.tar.gz"
PIP_PATH="portable_env/bin/python -m pip"
PACKAGES_PATH="portable_env/lib/python3.13/site-packages"
rm start_macos.sh start_windows.bat
# 1. Set platform-specific variables
if [[ "$RUNNER_OS" == "Windows" ]]; then
PLATFORM="windows"
PYTHON_URL="https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.13.12+20260303-x86_64-pc-windows-msvc-install_only.tar.gz"
PIP_PATH="portable_env/python.exe -m pip"
PACKAGES_PATH="portable_env/Lib/site-packages"
rm start_linux.sh start_macos.sh
else
PLATFORM="linux"
PYTHON_URL="https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.13.12+20260303-x86_64-unknown-linux-gnu-install_only.tar.gz"
PIP_PATH="portable_env/bin/python -m pip"
PACKAGES_PATH="portable_env/lib/python3.13/site-packages"
rm start_macos.sh start_windows.bat
fi
# 2. Download and extract Python
cd ..
@ -128,13 +133,8 @@ jobs:
tar -xzf python-build.tar.gz
mv python "text-generation-webui-${VERSION_CLEAN}/portable_env"
# 3. Prepare requirements file based on AVX
if [[ "$AVX_SUPPORT" == "AVX2" ]]; then
BASE_REQ_FILE="requirements/portable/requirements_amd.txt"
else
BASE_REQ_FILE="requirements/portable/requirements_amd_noavx2.txt"
fi
REQ_FILE="$BASE_REQ_FILE"
# 3. Prepare requirements file
REQ_FILE="requirements/portable/requirements_amd.txt"
cd "text-generation-webui-${VERSION_CLEAN}"
@ -150,7 +150,11 @@ jobs:
ZIP_NAME="textgen-portable-${VERSION_CLEAN}-${PLATFORM}-rocm.zip"
echo "Creating archive: $ZIP_NAME"
zip -r "$ZIP_NAME" "text-generation-webui-${VERSION_CLEAN}"
if [[ "$RUNNER_OS" == "Windows" ]]; then
powershell -Command "Compress-Archive -Path text-generation-webui-${VERSION_CLEAN} -DestinationPath $ZIP_NAME"
else
zip -r "$ZIP_NAME" "text-generation-webui-${VERSION_CLEAN}"
fi
- name: Upload files to a GitHub release
id: upload-release

View file

@ -59,7 +59,6 @@ jobs:
$matrix = @{
'os' = @('ubuntu-22.04', 'windows-2022')
'pyver' = @("3.13")
'avx' = @("AVX2")
}
if ($env:CONFIGIN -ne 'Default') {$env:CONFIGIN.split(';').foreach({$matrix[$_.split(':')[0]] = $_.split(':')[1].split(',')})}
@ -74,7 +73,7 @@ jobs:
Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
build_wheels:
name: ${{ matrix.os }} ${{ matrix.pyver }} CPU ${{ matrix.avx }}
name: ${{ matrix.os }} ${{ matrix.pyver }}
needs: define_matrix
runs-on: ${{ matrix.os }}
strategy:
@ -83,7 +82,6 @@ jobs:
run:
shell: pwsh
env:
AVXVER: ${{ matrix.avx }}
PCKGVER: ${{ inputs.version }}
steps:
@ -111,7 +109,6 @@ jobs:
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
# Define common variables
AVX_SUPPORT="${{ matrix.avx }}"
VERSION="${{ inputs.version }}"
# 1. Set platform-specific variables
@ -136,13 +133,8 @@ jobs:
tar -xzf python-build.tar.gz
mv python "text-generation-webui-${VERSION_CLEAN}/portable_env"
# 3. Prepare requirements file based on AVX
if [[ "$AVX_SUPPORT" == "AVX2" ]]; then
BASE_REQ_FILE="requirements/portable/requirements_vulkan.txt"
else
BASE_REQ_FILE="requirements/portable/requirements_vulkan_noavx2.txt"
fi
REQ_FILE="$BASE_REQ_FILE"
# 3. Prepare requirements file
REQ_FILE="requirements/portable/requirements_vulkan.txt"
cd "text-generation-webui-${VERSION_CLEAN}"

View file

@ -59,7 +59,6 @@ jobs:
$matrix = @{
'os' = @('ubuntu-22.04', 'windows-2022', 'macos-14')
'pyver' = @("3.13")
'avx' = @("AVX2")
}
if ($env:CONFIGIN -ne 'Default') {$env:CONFIGIN.split(';').foreach({$matrix[$_.split(':')[0]] = $_.split(':')[1].split(',')})}
@ -74,7 +73,7 @@ jobs:
Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT
build_wheels:
name: ${{ matrix.os }} ${{ matrix.pyver }} CPU ${{ matrix.avx }}
name: ${{ matrix.os }} ${{ matrix.pyver }}
needs: define_matrix
runs-on: ${{ matrix.os }}
strategy:
@ -83,7 +82,6 @@ jobs:
run:
shell: pwsh
env:
AVXVER: ${{ matrix.avx }}
PCKGVER: ${{ inputs.version }}
steps:
@ -111,7 +109,6 @@ jobs:
find extensions/ -mindepth 1 -maxdepth 1 -type d | grep -v -E "$(printf '%s|' "${allowed[@]}" | sed 's/|$//')" | xargs rm -rf
# Define common variables
AVX_SUPPORT="${{ matrix.avx }}"
VERSION="${{ inputs.version }}"
OS_TYPE="${{ matrix.os }}"
@ -151,7 +148,7 @@ jobs:
tar -xzf python-build.tar.gz
mv python "text-generation-webui-${VERSION_CLEAN}/portable_env"
# 3. Prepare requirements file based on platform and AVX
# 3. Prepare requirements file based on platform
cd "text-generation-webui-${VERSION_CLEAN}"
# Select requirements file based on platform
@ -162,12 +159,7 @@ jobs:
REQ_FILE="requirements/portable/requirements_apple_silicon.txt"
fi
else
# For Windows and Linux, check AVX support
if [[ "$AVX_SUPPORT" == "AVX2" ]]; then
REQ_FILE="requirements/portable/requirements_cpu_only.txt"
else
REQ_FILE="requirements/portable/requirements_cpu_only_noavx2.txt"
fi
REQ_FILE="requirements/portable/requirements_cpu_only.txt"
fi
echo "Using requirements file: $REQ_FILE"

View file

@ -171,16 +171,13 @@ pip install -r requirements/full/<requirements file according to table below>
Requirements file to use:
| GPU | CPU | requirements file to use |
|--------|---------|---------|
| NVIDIA | has AVX2 | `requirements.txt` |
| NVIDIA | no AVX2 | `requirements_noavx2.txt` |
| AMD | has AVX2 | `requirements_amd.txt` |
| AMD | no AVX2 | `requirements_amd_noavx2.txt` |
| CPU only | has AVX2 | `requirements_cpu_only.txt` |
| CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` |
| Apple | Intel | `requirements_apple_intel.txt` |
| Apple | Apple Silicon | `requirements_apple_silicon.txt` |
| GPU | requirements file to use |
|--------|---------|
| NVIDIA | `requirements.txt` |
| AMD | `requirements_amd.txt` |
| CPU only | `requirements_cpu_only.txt` |
| Apple Intel | `requirements_apple_intel.txt` |
| Apple Silicon | `requirements_apple_silicon.txt` |
### Start the web UI

View file

@ -65,24 +65,6 @@ def is_installed():
return os.path.isdir(conda_env_path)
def cpu_has_avx2():
try:
import cpuinfo
info = cpuinfo.get_cpu_info()
return 'avx2' in info['flags']
except:
return True
def cpu_has_amx():
try:
import cpuinfo
info = cpuinfo.get_cpu_info()
return 'amx' in info['flags']
except:
return True
def load_state():
"""Load installer state from JSON file"""
if os.path.exists(state_file):
@ -172,13 +154,13 @@ def get_requirements_file(gpu_choice):
requirements_base = os.path.join("requirements", "full")
if gpu_choice == "NVIDIA_CUDA128":
file_name = f"requirements{'_noavx2' if not cpu_has_avx2() else ''}.txt"
file_name = "requirements.txt"
elif gpu_choice == "AMD":
file_name = f"requirements_amd{'_noavx2' if not cpu_has_avx2() else ''}.txt"
file_name = "requirements_amd.txt"
elif gpu_choice == "APPLE":
file_name = f"requirements_apple_{'intel' if is_x86_64() else 'silicon'}.txt"
elif gpu_choice in ["INTEL", "NONE"]:
file_name = f"requirements_cpu_only{'_noavx2' if not cpu_has_avx2() else ''}.txt"
file_name = "requirements_cpu_only.txt"
else:
raise ValueError(f"Unknown GPU choice: {gpu_choice}")
@ -327,7 +309,7 @@ def install_webui():
elif any((is_windows(), is_linux())) and gpu_choice == "NVIDIA_CUDA128":
print("CUDA: 12.8")
# No PyTorch for AMD on Windows (?)
# No PyTorch for AMD on Windows
elif is_windows() and gpu_choice == "AMD":
print("PyTorch setup on Windows is not implemented yet. Exiting...")
sys.exit(1)
@ -335,7 +317,7 @@ def install_webui():
# Install Git and then Pytorch
print_big_message("Installing PyTorch.")
install_pytorch = get_pytorch_install_command(gpu_choice)
run_cmd(f"conda install -y ninja git && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
run_cmd(f"conda install -y ninja git && {install_pytorch}", assert_success=True, environment=True)
if gpu_choice == "INTEL":
# Install oneAPI dependencies via conda

View file

@ -42,8 +42,8 @@ sse-starlette==1.6.5
tiktoken
# CUDA wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cu124-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cu124-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp-org/exllamav3/releases/download/v0.0.22/exllamav3-0.0.22+cu128.torch2.9.0-cp313-cp313-win_amd64.whl; platform_system == "Windows" and python_version == "3.13"
https://github.com/turboderp-org/exllamav3/releases/download/v0.0.22/exllamav3-0.0.22+cu128.torch2.9.0-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2+cu128.torch2.9.0-cp313-cp313-win_amd64.whl; platform_system == "Windows" and python_version == "3.13"

View file

@ -40,7 +40,7 @@ sse-starlette==1.6.5
tiktoken
# AMD wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkan-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+rocm6.4.4-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+rocm6.4-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+rocm6.4-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2+rocm6.4.torch2.9.0-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"

View file

@ -1,46 +0,0 @@
accelerate==1.12.*
audioop-lts<1.0; python_version >= "3.13"
colorama
datasets
diffusers==0.36.*
einops
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pandas
peft==0.18.*
Pillow>=9.5.0
psutil
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
safetensors==0.7.*
scipy
sentencepiece
tensorboard
torchao==0.15.*
transformers==5.2.*
triton-windows==3.5.1.post24; platform_system == "Windows"
tqdm
wandb
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# AMD wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkanavx-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkanavx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2+rocm6.4.torch2.9.0-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"

View file

@ -40,5 +40,4 @@ sse-starlette==1.6.5
tiktoken
# Mac wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_15_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0-py3-none-macosx_13_0_x86_64.whl; platform_system == "Darwin"

View file

@ -40,5 +40,4 @@ sse-starlette==1.6.5
tiktoken
# Mac wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_15_0_arm64.whl; platform_system == "Darwin" and platform_release >= "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0-py3-none-macosx_13_0_arm64.whl; platform_system == "Darwin"

View file

@ -39,6 +39,6 @@ flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# llama.cpp (CPU only, AVX2)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx2-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx2-py3-none-win_amd64.whl; platform_system == "Windows"
# llama.cpp (CPU only)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cpu-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cpu-py3-none-win_amd64.whl; platform_system == "Windows"

View file

@ -1,44 +0,0 @@
accelerate==1.12.*
audioop-lts<1.0; python_version >= "3.13"
colorama
datasets
diffusers==0.36.*
einops
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pandas
peft==0.18.*
Pillow>=9.5.0
psutil
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
safetensors==0.7.*
scipy
sentencepiece
tensorboard
torchao==0.15.*
transformers==5.2.*
triton-windows==3.5.1.post24; platform_system == "Windows"
tqdm
wandb
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# llama.cpp (CPU only, no AVX2)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx-py3-none-win_amd64.whl; platform_system == "Windows"

View file

@ -1,53 +0,0 @@
accelerate==1.12.*
audioop-lts<1.0; python_version >= "3.13"
bitsandbytes==0.49.*
colorama
datasets
diffusers==0.36.*
einops
fastapi==0.112.4
flash-linear-attention==0.4.*
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pandas
peft==0.18.*
Pillow>=9.5.0
psutil
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
safetensors==0.7.*
scipy
sentencepiece
tensorboard
torchao==0.15.*
transformers==5.2.*
triton-windows==3.5.1.post24; platform_system == "Windows"
tqdm
wandb
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# CUDA wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124avx-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124avx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/turboderp-org/exllamav3/releases/download/v0.0.22/exllamav3-0.0.22+cu128.torch2.9.0-cp313-cp313-win_amd64.whl; platform_system == "Windows" and python_version == "3.13"
https://github.com/turboderp-org/exllamav3/releases/download/v0.0.22/exllamav3-0.0.22+cu128.torch2.9.0-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2+cu128.torch2.9.0-cp313-cp313-win_amd64.whl; platform_system == "Windows" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2+cu128.torch2.9.0-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"
https://github.com/turboderp-org/exllamav2/releases/download/v0.3.2/exllamav2-0.3.2-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
https://github.com/kingbri1/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu128torch2.9.0cxx11abiFALSE-cp313-cp313-win_amd64.whl; platform_system == "Windows" and python_version == "3.13"
https://github.com/kingbri1/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu128torch2.9.0cxx11abiFALSE-cp313-cp313-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.13"

View file

@ -23,5 +23,5 @@ sse-starlette==1.6.5
tiktoken
# CUDA wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cu124-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cu124-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View file

@ -23,5 +23,5 @@ sse-starlette==1.6.5
tiktoken
# AMD wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkan-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+rocm6.4.4-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+rocm6.4-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+rocm6.4-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View file

@ -1,27 +0,0 @@
audioop-lts<1.0; python_version >= "3.13"
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
tqdm
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# AMD wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkanavx-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+rocm6.4.4avx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View file

@ -23,5 +23,4 @@ sse-starlette==1.6.5
tiktoken
# Mac wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_15_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0-py3-none-macosx_13_0_x86_64.whl; platform_system == "Darwin"

View file

@ -23,5 +23,4 @@ sse-starlette==1.6.5
tiktoken
# Mac wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_15_0_arm64.whl; platform_system == "Darwin" and platform_release >= "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0-py3-none-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0-py3-none-macosx_13_0_arm64.whl; platform_system == "Darwin"

View file

@ -22,6 +22,6 @@ flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# llama.cpp (CPU only, AVX2)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx2-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx2-py3-none-win_amd64.whl; platform_system == "Windows"
# llama.cpp (CPU only)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cpu-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+cpu-py3-none-win_amd64.whl; platform_system == "Windows"

View file

@ -1,27 +0,0 @@
audioop-lts<1.0; python_version >= "3.13"
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
tqdm
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# llama.cpp (CPU only, no AVX2)
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cpuavx-py3-none-win_amd64.whl; platform_system == "Windows"

View file

@ -1,27 +0,0 @@
audioop-lts<1.0; python_version >= "3.13"
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
tqdm
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# CUDA wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124avx-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+cu124avx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View file

@ -23,5 +23,5 @@ sse-starlette==1.6.5
tiktoken
# Vulkan wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkan-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkan-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+vulkan-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.83.0/llama_cpp_binaries-0.83.0+vulkan-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View file

@ -1,27 +0,0 @@
audioop-lts<1.0; python_version >= "3.13"
fastapi==0.112.4
html2text==2025.4.15
huggingface-hub==1.5.*
jinja2==3.1.6
markdown
numpy==2.2.*
pydantic==2.11.0
PyPDF2==3.0.1
python-docx==1.1.2
pyyaml
requests
rich
tqdm
# Gradio
gradio==4.37.*
https://github.com/oobabooga/gradio/releases/download/custom-build/gradio_client-1.0.2+custom.1-py3-none-any.whl
# API
flask_cloudflared==0.0.14
sse-starlette==1.6.5
tiktoken
# CUDA wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkanavx-py3-none-win_amd64.whl; platform_system == "Windows"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.74.0/llama_cpp_binaries-0.74.0+vulkanavx-py3-none-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"