llama.cpp: Support literal flags in --extra-flags (e.g. --rpc, --jinja)

The old format is still accepted for backwards compatibility.
This commit is contained in:
oobabooga 2026-03-17 19:47:55 -07:00
parent 2a6b1fdcba
commit 7e54e7b7ae
3 changed files with 23 additions and 18 deletions

View file

@ -109,7 +109,7 @@ group.add_argument('--threads-batch', type=int, default=0, help='Number of threa
group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
group.add_argument('--parallel', type=int, default=1, help='Number of parallel request slots. The context size is divided equally among slots. For example, to have 4 slots with 8192 context each, set ctx_size to 32768.')
group.add_argument('--fit-target', type=str, default='512', help='Target VRAM margin per device for auto GPU layers, comma-separated list of values in MiB. A single value is broadcast across all devices.')
group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"')
group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Example: "--jinja --rpc 192.168.1.100:50052"')
# Transformers/Accelerate
group = parser.add_argument_group('Transformers/Accelerate')