Change the default ctx-size to 0 (auto) for llama.cpp

This commit is contained in:
oobabooga 2026-03-14 15:30:01 -07:00
parent 9f657d3976
commit 4ae2bd86e2
4 changed files with 8 additions and 3 deletions

View file

@ -38,6 +38,9 @@ def load_model(model_name, loader=None):
sampler_hijack.hijack_samplers()
shared.args.loader = loader
if loader != 'llama.cpp' and shared.args.ctx_size == 0:
shared.args.ctx_size = 8192
output = load_func_map[loader](model_name)
if type(output) is tuple:
model, tokenizer = output