Make the llama.cpp --verbose output less verbose

This commit is contained in:
oobabooga 2025-08-25 17:43:21 -07:00
parent adeca8a658
commit 3ad5970374

View file

@ -193,7 +193,7 @@ class LlamaServer:
if shared.args.verbose:
logger.info("GENERATE_PARAMS=")
printable_payload = {k: (v if k != "prompt" else "[multimodal object]" if pil_images else v) for k, v in payload.items()}
printable_payload = {k: v for k, v in payload.items() if k != "prompt"}
pprint.PrettyPrinter(indent=4, sort_dicts=False).pprint(printable_payload)
print()