From 9ea2a69210ab5658ba8daf6d7d604589de5fc741 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 8 May 2025 10:41:25 -0700 Subject: [PATCH] llama.cpp: Add --no-webui to the llama-server command --- modules/llama_cpp_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index d8d2f61b..1046969a 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -261,6 +261,7 @@ class LlamaServer: "--gpu-layers", str(shared.args.gpu_layers), "--batch-size", str(shared.args.batch_size), "--port", str(self.port), + "--no-webui", ] if shared.args.flash_attn: