From a0abf934251e8f376c5809eabaf203f29a79ae5d Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 18 Apr 2025 06:53:51 -0700 Subject: [PATCH] Connect --rope-freq-base to the new llama.cpp loader --- modules/llama_cpp_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/llama_cpp_server.py b/modules/llama_cpp_server.py index 983b506f..39aaf421 100644 --- a/modules/llama_cpp_server.py +++ b/modules/llama_cpp_server.py @@ -245,6 +245,7 @@ class LlamaServer: "--ctx-size", str(shared.args.n_ctx), "--n-gpu-layers", str(shared.args.n_gpu_layers), "--batch-size", str(shared.args.batch_size), + "--rope-freq-base", str(shared.args.rope_freq_base), "--port", str(self.port), ]