diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index ecc543f3..7199470d 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -301,6 +301,20 @@ class LlamaServer: cmd += ["--device-draft", shared.args.device_draft] if shared.args.ctx_size_draft > 0: cmd += ["--ctx-size-draft", str(shared.args.ctx_size_draft)] + if shared.args.extra_flags: + # Clean up the input + extra_flags = shared.args.extra_flags.strip() + if extra_flags.startswith('"') and extra_flags.endswith('"'): + extra_flags = extra_flags[1:-1].strip() + elif extra_flags.startswith("'") and extra_flags.endswith("'"): + extra_flags = extra_flags[1:-1].strip() + + for flag_item in extra_flags.split(';'): + if '=' in flag_item: + flag, value = flag_item.split('=', 1) + cmd += [f"--{flag}", value] + else: + cmd.append(f"--{flag_item}") env = os.environ.copy() if os.name == 'posix': diff --git a/modules/loaders.py b/modules/loaders.py index d256e1e7..9442a147 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -12,6 +12,7 @@ loaders_and_params = OrderedDict({ 'n_ctx', 'cache_type', 'tensor_split', + 'extra_flags', 'rope_freq_base', 'compress_pos_emb', 'flash_attn', diff --git a/modules/shared.py b/modules/shared.py index 98ec50b2..b4f87082 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -128,6 +128,7 @@ group.add_argument('--tensor-split', type=str, default=None, help='Split the mod group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.') group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') group.add_argument('--row-split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.') +group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1;flag2;flag3=value3". Example: "override-tensor=exps=CPU"') # Speculative decoding group = parser.add_argument_group('Speculative decoding') diff --git a/modules/ui.py b/modules/ui.py index 6fc5e955..19b76cee 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -114,6 +114,7 @@ def list_model_elements(): 'max_seq_len', 'cache_type', 'tensor_split', + 'extra_flags', 'gpu_split', 'alpha_value', 'rope_freq_base', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 56b6903f..f3319cfb 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -56,6 +56,7 @@ def create_ui(): shared.gradio['cache_type'] = gr.Dropdown(label="cache_type", choices=['fp16', 'q8_0', 'q4_0', 'fp8', 'q8', 'q6', 'q4'], value=shared.args.cache_type, info='Valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV2 - fp16, fp8, q8, q6, q4.') shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='List of proportions to split the model across multiple GPUs. Example: 60,40') shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') + shared.gradio['extra_flags'] = gr.Textbox(label='extra-flags', info='Additional flags to pass to llama-server. Format: "flag1=value1;flag2;flag3=value3". Example: "override-tensor=exps=CPU"') shared.gradio['cpu_memory'] = gr.Number(label="Maximum CPU memory in GiB. Use this for CPU offloading.", value=shared.args.cpu_memory) shared.gradio['alpha_value'] = gr.Number(label='alpha_value', value=shared.args.alpha_value, precision=2, info='Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.') shared.gradio['rope_freq_base'] = gr.Number(label='rope_freq_base', value=shared.args.rope_freq_base, precision=0, info='Positional embeddings frequency base for NTK RoPE scaling. Related to alpha_value by rope_freq_base = 10000 * alpha_value ^ (64 / 63). 0 = from model.')