Add slider for --ubatch-size for llama.cpp loader, change defaults for better MoE performance (#7316)

This commit is contained in:
GodEmperor785 2025-11-21 20:56:02 +01:00 committed by GitHub
parent 8f0048663d
commit 400bb0694b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 6 additions and 1 deletions

View file

@ -74,7 +74,8 @@ group.add_argument('--row-split', action='store_true', help='Split the model by
group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
group.add_argument('--batch-size', type=int, default=256, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size')
group.add_argument('--ubatch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).')
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')