Add guard against training with llama.cpp loader

This commit is contained in:
oobabooga 2026-03-08 10:46:51 -03:00
parent 5a91b8462f
commit f6ffecfff2
2 changed files with 6 additions and 1 deletions

View file

@ -310,6 +310,11 @@ def do_train(lora_name: str, always_override: bool, all_linear: bool, q_proj_en:
# == Input validation / processing ==
yield "Preparing the input..."
if shared.args.loader == 'llama.cpp':
yield "Error: LoRA training requires a model loaded with the Transformers loader. GGUF models are not supported for training."
return
lora_file_path = clean_path(None, lora_name)
if lora_file_path.strip() == '':
yield "Missing or invalid LoRA file name input."