From a92758a1444626167468f0b0552a642b1e9245a2 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 27 Aug 2025 16:15:20 -0700 Subject: [PATCH] llama.cpp: Fix obtaining the maximum sequence length for GPT-OSS --- modules/models_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models_settings.py b/modules/models_settings.py index 7645880f..6dc000b4 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -68,7 +68,7 @@ def get_model_metadata(model): metadata = load_gguf_metadata_with_cache(model_file) for k in metadata: - if k.endswith('context_length'): + if k.endswith('.context_length'): model_settings['ctx_size'] = min(metadata[k], 8192) model_settings['truncation_length_info'] = metadata[k] elif k.endswith('rope.freq_base'):