From 3ad59703748dcd5685dcbb7368df45914661e8da Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 25 Aug 2025 17:43:21 -0700 Subject: [PATCH] Make the llama.cpp --verbose output less verbose --- modules/llama_cpp_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index e3dd43b4..8579f843 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -193,7 +193,7 @@ class LlamaServer: if shared.args.verbose: logger.info("GENERATE_PARAMS=") - printable_payload = {k: (v if k != "prompt" else "[multimodal object]" if pil_images else v) for k, v in payload.items()} + printable_payload = {k: v for k, v in payload.items() if k != "prompt"} pprint.PrettyPrinter(indent=4, sort_dicts=False).pprint(printable_payload) print()