From 5327bc939705c0f65e77f8925ffd248fd3e66b88 Mon Sep 17 00:00:00 2001 From: oobabooga Date: Fri, 28 Nov 2025 22:48:05 -0300 Subject: [PATCH] Update modules/shared.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 289fda54..134c0cac 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -74,7 +74,7 @@ group.add_argument('--row-split', action='store_true', help='Split the model by group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.') group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') -group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size') +group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size.') group.add_argument('--ubatch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).') group.add_argument('--threads', type=int, default=0, help='Number of threads to use.') group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')