diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index 47d9d27c..f83ed663 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -317,6 +317,7 @@ class LlamaServer: "--ctx-size", str(shared.args.ctx_size), "--gpu-layers", str(shared.args.gpu_layers), "--batch-size", str(shared.args.batch_size), + "--ubatch-size", str(shared.args.ubatch_size), "--port", str(self.port), "--no-webui", "--flash-attn", "on", diff --git a/modules/loaders.py b/modules/loaders.py index 0f0f6d1e..0bf3781b 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -10,6 +10,7 @@ loaders_and_params = OrderedDict({ 'threads', 'threads_batch', 'batch_size', + 'ubatch_size', 'ctx_size', 'cache_type', 'tensor_split', diff --git a/modules/shared.py b/modules/shared.py index 1cca1233..289fda54 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -74,7 +74,8 @@ group.add_argument('--row-split', action='store_true', help='Split the model by group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.') group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') -group.add_argument('--batch-size', type=int, default=256, help='Maximum number of prompt tokens to batch together when calling llama_eval.') +group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size') +group.add_argument('--ubatch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).') group.add_argument('--threads', type=int, default=0, help='Number of threads to use.') group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.') group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.') diff --git a/modules/ui.py b/modules/ui.py index d8dcedfb..f99e8b6a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -129,6 +129,7 @@ def list_model_elements(): 'threads', 'threads_batch', 'batch_size', + 'ubatch_size', 'ctx_size', 'cache_type', 'tensor_split', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 31ab929f..86adc229 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -84,6 +84,7 @@ def create_ui(): shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=256, value=shared.args.threads) shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch) shared.gradio['batch_size'] = gr.Slider(label="batch_size", minimum=1, maximum=4096, step=1, value=shared.args.batch_size) + shared.gradio['ubatch_size'] = gr.Slider(label="ubatch_size", minimum=1, maximum=4096, step=1, value=shared.args.ubatch_size) shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='List of proportions to split the model across multiple GPUs. Example: 60,40') shared.gradio['extra_flags'] = gr.Textbox(label='extra-flags', info='Additional flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"', value=shared.args.extra_flags) shared.gradio['cpu_memory'] = gr.Number(label="Maximum CPU memory in GiB. Use this for CPU offloading.", value=shared.args.cpu_memory)