From 4ae2bd86e28fe67755e5329069a0d073d1162bae Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 14 Mar 2026 15:30:01 -0700 Subject: [PATCH] Change the default ctx-size to 0 (auto) for llama.cpp --- modules/llama_cpp_server.py | 4 +++- modules/models.py | 3 +++ modules/shared.py | 2 +- modules/ui_model_menu.py | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index 1425844d..fc8e9a19 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -371,6 +371,8 @@ class LlamaServer: if shared.args.ctx_size > 0: cmd += ["--ctx-size", str(shared.args.ctx_size)] + elif shared.args.gpu_layers >= 0: + cmd += ["--ctx-size", "8192"] if shared.args.gpu_layers >= 0: cmd += ["--gpu-layers", str(shared.args.gpu_layers), "--fit", "off"] @@ -477,7 +479,7 @@ class LlamaServer: print() gpu_layers_str = "auto" if shared.args.gpu_layers < 0 else str(shared.args.gpu_layers) - ctx_size_str = "auto" if shared.args.ctx_size == 0 else str(shared.args.ctx_size) + ctx_size_str = "auto" if shared.args.ctx_size == 0 and shared.args.gpu_layers < 0 else str(shared.args.ctx_size or 8192) logger.info(f"Using gpu_layers={gpu_layers_str} | ctx_size={ctx_size_str} | cache_type={cache_type}") # Start the server with pipes for output self.process = subprocess.Popen( diff --git a/modules/models.py b/modules/models.py index d83b98d7..1d139b89 100644 --- a/modules/models.py +++ b/modules/models.py @@ -38,6 +38,9 @@ def load_model(model_name, loader=None): sampler_hijack.hijack_samplers() shared.args.loader = loader + if loader != 'llama.cpp' and shared.args.ctx_size == 0: + shared.args.ctx_size = 8192 + output = load_func_map[loader](model_name) if type(output) is tuple: model, tokenizer = output diff --git a/modules/shared.py b/modules/shared.py index 8c0aad9a..1cf365c6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -76,7 +76,7 @@ group.add_argument('--loader', type=str, help='Choose the model loader manually, # Cache group = parser.add_argument_group('Context and cache') -group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=8192, metavar='N', help='Context size in tokens. llama.cpp: 0 = auto if gpu-layers is also -1.') +group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=0, metavar='N', help='Context size in tokens. 0 = auto for llama.cpp (requires gpu-layers=-1), 8192 for other loaders.') group.add_argument('--cache-type', '--cache_type', type=str, default='fp16', metavar='N', help='KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV3 - fp16, q2 to q8 (can specify k_bits and v_bits separately, e.g. q4_q8).') # Speculative decoding diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 6ab19b7c..b53bc292 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -42,7 +42,7 @@ def create_ui(): with gr.Row(): with gr.Column(): shared.gradio['gpu_layers'] = gr.Slider(label="gpu-layers", minimum=-1, maximum=get_initial_gpu_layers_max(), step=1, value=shared.args.gpu_layers, info='Number of layers to offload to the GPU. -1 = auto.') - shared.gradio['ctx_size'] = gr.Slider(label='ctx-size', minimum=0, maximum=1048576, step=1024, value=shared.args.ctx_size, info='Context length. llama.cpp: 0 = auto if gpu-layers is also -1. Common values: 4096, 8192, 16384, 32768, 65536, 131072.') + shared.gradio['ctx_size'] = gr.Slider(label='ctx-size', minimum=0, maximum=1048576, step=1024, value=shared.args.ctx_size, info='Context length. 0 = auto for llama.cpp (requires gpu-layers=-1), 8192 for other loaders. Common values: 4096, 8192, 16384, 32768, 65536, 131072.') shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') shared.gradio['attn_implementation'] = gr.Dropdown(label="attn-implementation", choices=['sdpa', 'eager', 'flash_attention_2'], value=shared.args.attn_implementation, info='Attention implementation.') shared.gradio['cache_type'] = gr.Dropdown(label="cache-type", choices=['fp16', 'q8_0', 'q4_0', 'fp8', 'q8', 'q7', 'q6', 'q5', 'q4', 'q3', 'q2'], value=shared.args.cache_type, allow_custom_value=True, info='Valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV3 - fp16, q2 to q8. For ExLlamaV3, you can type custom combinations for separate k/v bits (e.g. q4_q8).')