diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py
index 2631f253..a5e90c94 100644
--- a/modules/llama_cpp_server.py
+++ b/modules/llama_cpp_server.py
@@ -347,7 +347,7 @@ class LlamaServer:
"--flash-attn", "on",
]
- if shared.args.gpu_layers > 0:
+ if shared.args.gpu_layers >= 0:
cmd += ["--gpu-layers", str(shared.args.gpu_layers), "--fit", "off"]
else:
cmd += ["--fit", "on"]
@@ -448,7 +448,8 @@ class LlamaServer:
print(' '.join(str(item) for item in cmd[1:]))
print()
- logger.info(f"Using gpu_layers={shared.args.gpu_layers} | ctx_size={shared.args.ctx_size} | cache_type={cache_type}")
+ gpu_layers_str = "auto" if shared.args.gpu_layers < 0 else str(shared.args.gpu_layers)
+ logger.info(f"Using gpu_layers={gpu_layers_str} | ctx_size={shared.args.ctx_size} | cache_type={cache_type}")
# Start the server with pipes for output
self.process = subprocess.Popen(
cmd,
diff --git a/modules/models_settings.py b/modules/models_settings.py
index 3b28a800..1ef436e0 100644
--- a/modules/models_settings.py
+++ b/modules/models_settings.py
@@ -77,7 +77,7 @@ def get_model_metadata(model):
elif k.endswith('rope.scaling.factor'):
model_settings['compress_pos_emb'] = metadata[k]
elif k.endswith('.block_count'):
- model_settings['gpu_layers'] = 0
+ model_settings['gpu_layers'] = -1
model_settings['max_gpu_layers'] = metadata[k] + 1
if 'tokenizer.chat_template' in metadata:
@@ -264,7 +264,7 @@ def apply_model_settings_to_state(model, state):
# Handle GPU layers and VRAM update for llama.cpp
if state['loader'] == 'llama.cpp' and 'gpu_layers' in model_settings:
- gpu_layers = model_settings['gpu_layers'] # 0 (auto) by default, or user-saved value
+ gpu_layers = model_settings['gpu_layers'] # -1 (auto) by default, or user-saved value
max_layers = model_settings.get('max_gpu_layers', 256)
state['gpu_layers'] = gr.update(value=gpu_layers, maximum=max_layers)
@@ -418,7 +418,7 @@ def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type):
Compute the estimated VRAM usage for the given GPU layers and return
an HTML string for the UI display.
"""
- if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf"):
+ if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf") or gpu_layers < 0:
return "
Estimated VRAM to load the model:
"
vram_usage = estimate_vram(model, gpu_layers, ctx_size, cache_type)
diff --git a/modules/shared.py b/modules/shared.py
index c917377a..787f04d9 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -88,7 +88,7 @@ group.add_argument('--spec-ngram-min-hits', type=int, default=1, help='Minimum n
# llama.cpp
group = parser.add_argument_group('llama.cpp')
-group.add_argument('--gpu-layers', '--n-gpu-layers', type=int, default=0, metavar='N', help='Number of layers to offload to the GPU. 0 means auto (llama.cpp decides via --fit).')
+group.add_argument('--gpu-layers', '--n-gpu-layers', type=int, default=-1, metavar='N', help='Number of layers to offload to the GPU. Set to -1 for auto mode, where llama.cpp decides via --fit.')
group.add_argument('--cpu-moe', action='store_true', help='Move the experts to the CPU (for MoE models).')
group.add_argument('--mmproj', type=str, default=None, help='Path to the mmproj file for vision models.')
group.add_argument('--streaming-llm', action='store_true', help='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py
index c65c7a1b..430123eb 100644
--- a/modules/ui_model_menu.py
+++ b/modules/ui_model_menu.py
@@ -41,7 +41,7 @@ def create_ui():
gr.Markdown("## Main options")
with gr.Row():
with gr.Column():
- shared.gradio['gpu_layers'] = gr.Slider(label="gpu-layers", minimum=0, maximum=get_initial_gpu_layers_max(), step=1, value=shared.args.gpu_layers, info='0 = auto (llama.cpp decides via --fit). Set manually to override.')
+ shared.gradio['gpu_layers'] = gr.Slider(label="gpu-layers", minimum=-1, maximum=get_initial_gpu_layers_max(), step=1, value=shared.args.gpu_layers, info='Number of layers to offload to the GPU. -1 = auto (llama.cpp decides via --fit).')
shared.gradio['ctx_size'] = gr.Slider(label='ctx-size', minimum=256, maximum=131072, step=256, value=shared.args.ctx_size, info='Context length. Common values: 4096, 8192, 16384, 32768, 65536, 131072.')
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
shared.gradio['attn_implementation'] = gr.Dropdown(label="attn-implementation", choices=['sdpa', 'eager', 'flash_attention_2'], value=shared.args.attn_implementation, info='Attention implementation.')