From 8f2493cc60036648aedc38d9d8721993743f86e7 Mon Sep 17 00:00:00 2001 From: Matthew Jenkins <40323108+Matthew-Jenkins@users.noreply.github.com> Date: Thu, 24 Apr 2025 22:38:57 -0400 Subject: [PATCH] Prevent llamacpp defaults from locking up consumer hardware (#6870) --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index eeaeb689..98ec50b2 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -120,7 +120,7 @@ group.add_argument('--flash-attn', action='store_true', help='Use flash-attentio group.add_argument('--n_ctx', type=int, default=8192, help='Size of the prompt context.') group.add_argument('--threads', type=int, default=0, help='Number of threads to use.') group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.') -group.add_argument('--batch-size', type=int, default=2048, help='Maximum number of prompt tokens to batch together when calling llama_eval.') +group.add_argument('--batch-size', type=int, default=256, help='Maximum number of prompt tokens to batch together when calling llama_eval.') group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.') group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') group.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')