Fix portable apple intel requirement for llama binaries (issue #7238) (#7239)

This commit is contained in:
Ionoclast Laboratories 2025-10-08 08:40:53 -07:00 committed by GitHub
parent 292c91abbb
commit d229dfe991
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -21,3 +21,4 @@ tiktoken
# Mac wheels
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.48.0/llama_cpp_binaries-0.48.0-py3-none-macosx_15_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "24.0.0" and platform_release < "25.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.48.0/llama_cpp_binaries-0.48.0-py3-none-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0"
https://github.com/oobabooga/llama-cpp-binaries/releases/download/v0.48.0/llama_cpp_binaries-0.48.0-py3-none-macosx_13_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0"