From 66fb79fe15f38c313ed9a2fbaabfad252c2b121a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 5 Mar 2026 20:46:08 -0800 Subject: [PATCH] llama.cpp: Add --fit-target param --- modules/llama_cpp_server.py | 2 ++ modules/loaders.py | 1 + modules/shared.py | 1 + modules/ui.py | 1 + modules/ui_model_menu.py | 1 + 5 files changed, 6 insertions(+) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index eb99f2a8..0e8828d9 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -354,6 +354,8 @@ class LlamaServer: cmd += ["--gpu-layers", str(shared.args.gpu_layers), "--fit", "off"] else: cmd += ["--fit", "on"] + if shared.args.fit_target: + cmd += ["--fit-target", shared.args.fit_target] if shared.args.threads > 0: cmd += ["--threads", str(shared.args.threads)] diff --git a/modules/loaders.py b/modules/loaders.py index 15b8dfeb..42a5ff1c 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -6,6 +6,7 @@ import gradio as gr loaders_and_params = OrderedDict({ 'llama.cpp': [ 'gpu_layers', + 'fit_target', 'cpu_moe', 'threads', 'threads_batch', diff --git a/modules/shared.py b/modules/shared.py index e9535aa0..de0820af 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -108,6 +108,7 @@ group.add_argument('--threads', type=int, default=0, help='Number of threads to group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.') group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.') group.add_argument('--parallel', type=int, default=1, help='Number of parallel request slots. The context size is divided equally among slots. For example, to have 4 slots with 8192 context each, set ctx_size to 32768.') +group.add_argument('--fit-target', type=str, default='1024', help='Target VRAM margin per device for auto GPU layers, comma-separated list of values in MiB. A single value is broadcast across all devices. Default: 1024.') group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"') # Transformers/Accelerate diff --git a/modules/ui.py b/modules/ui.py index ae998ebb..70e929f2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -125,6 +125,7 @@ def list_model_elements(): 'loader', 'cpu_memory', 'gpu_layers', + 'fit_target', 'cpu_moe', 'threads', 'threads_batch', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 33b39a25..7e91f1ce 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -46,6 +46,7 @@ def create_ui(): shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') shared.gradio['attn_implementation'] = gr.Dropdown(label="attn-implementation", choices=['sdpa', 'eager', 'flash_attention_2'], value=shared.args.attn_implementation, info='Attention implementation.') shared.gradio['cache_type'] = gr.Dropdown(label="cache-type", choices=['fp16', 'q8_0', 'q4_0', 'fp8', 'q8', 'q7', 'q6', 'q5', 'q4', 'q3', 'q2'], value=shared.args.cache_type, allow_custom_value=True, info='Valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV3 - fp16, q2 to q8. For ExLlamaV3, you can type custom combinations for separate k/v bits (e.g. q4_q8).') + shared.gradio['fit_target'] = gr.Textbox(label='fit-target', value=shared.args.fit_target, info='Target VRAM margin per device for auto GPU layers (MiB). Comma-separated list for multiple devices. Default: 1024.') shared.gradio['tp_backend'] = gr.Dropdown(label="tp-backend", choices=['native', 'nccl'], value=shared.args.tp_backend, info='The backend for tensor parallelism.') with gr.Column():