Add multimodal support (llama.cpp) (#7027)

This commit is contained in:
oobabooga 2025-08-10 01:27:25 -03:00 committed by GitHub
parent eb16f64017
commit d86b0ec010
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 86 additions and 18 deletions

View file

@ -85,6 +85,7 @@ group.add_argument('--no-kv-offload', action='store_true', help='Do not offload
group.add_argument('--row-split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"')
group.add_argument('--streaming-llm', action='store_true', help='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
group.add_argument('--mmproj', type=str, default=None, help='Path to the mmproj file for vision models.')
# Cache
group = parser.add_argument_group('Context and cache')