From fbfcd59fe01310137dca5001107575237064e422 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 4 Mar 2026 19:21:45 -0800 Subject: [PATCH] llama.cpp: Use -1 instead of 0 for auto gpu_layers --- modules/llama_cpp_server.py | 5 +++-- modules/models_settings.py | 6 +++--- modules/shared.py | 2 +- modules/ui_model_menu.py | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index 2631f253..a5e90c94 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -347,7 +347,7 @@ class LlamaServer: "--flash-attn", "on", ] - if shared.args.gpu_layers > 0: + if shared.args.gpu_layers >= 0: cmd += ["--gpu-layers", str(shared.args.gpu_layers), "--fit", "off"] else: cmd += ["--fit", "on"] @@ -448,7 +448,8 @@ class LlamaServer: print(' '.join(str(item) for item in cmd[1:])) print() - logger.info(f"Using gpu_layers={shared.args.gpu_layers} | ctx_size={shared.args.ctx_size} | cache_type={cache_type}") + gpu_layers_str = "auto" if shared.args.gpu_layers < 0 else str(shared.args.gpu_layers) + logger.info(f"Using gpu_layers={gpu_layers_str} | ctx_size={shared.args.ctx_size} | cache_type={cache_type}") # Start the server with pipes for output self.process = subprocess.Popen( cmd, diff --git a/modules/models_settings.py b/modules/models_settings.py index 3b28a800..1ef436e0 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -77,7 +77,7 @@ def get_model_metadata(model): elif k.endswith('rope.scaling.factor'): model_settings['compress_pos_emb'] = metadata[k] elif k.endswith('.block_count'): - model_settings['gpu_layers'] = 0 + model_settings['gpu_layers'] = -1 model_settings['max_gpu_layers'] = metadata[k] + 1 if 'tokenizer.chat_template' in metadata: @@ -264,7 +264,7 @@ def apply_model_settings_to_state(model, state): # Handle GPU layers and VRAM update for llama.cpp if state['loader'] == 'llama.cpp' and 'gpu_layers' in model_settings: - gpu_layers = model_settings['gpu_layers'] # 0 (auto) by default, or user-saved value + gpu_layers = model_settings['gpu_layers'] # -1 (auto) by default, or user-saved value max_layers = model_settings.get('max_gpu_layers', 256) state['gpu_layers'] = gr.update(value=gpu_layers, maximum=max_layers) @@ -418,7 +418,7 @@ def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type): Compute the estimated VRAM usage for the given GPU layers and return an HTML string for the UI display. """ - if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf"): + if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf") or gpu_layers < 0: return "