llama.cpp: Add StreamingLLM (--streaming-llm)

This commit is contained in:
oobabooga 2025-04-25 16:21:35 -07:00
parent d35818f4e1
commit 877cf44c08
3 changed files with 4 additions and 0 deletions

View file

@ -128,6 +128,7 @@ group.add_argument('--numa', action='store_true', help='Activate NUMA task alloc
group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
group.add_argument('--row-split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1;flag2;flag3=value3". Example: "override-tensor=exps=CPU"')
group.add_argument('--streaming-llm', action='store_true', help='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
# Speculative decoding
group = parser.add_argument_group('Speculative decoding')