Add guard against training with llama.cpp loader

This commit is contained in:
oobabooga 2026-03-08 10:46:51 -03:00
parent 5a91b8462f
commit f6ffecfff2
2 changed files with 6 additions and 1 deletions

View file

@ -4,7 +4,7 @@ A LoRA is tied to a specific model architecture — a LoRA trained on Llama 3 8B
### Quick Start
1. Load your base model (no LoRAs loaded).
1. Load your base model with the **Transformers** loader (no LoRAs loaded).
2. Open the **Training** tab > **Train LoRA**.
3. Pick a dataset and configure parameters (see [below](#parameters)).
4. Click **Start LoRA Training** and monitor the [loss](#loss).

View file

@ -310,6 +310,11 @@ def do_train(lora_name: str, always_override: bool, all_linear: bool, q_proj_en:
# == Input validation / processing ==
yield "Preparing the input..."
if shared.args.loader == 'llama.cpp':
yield "Error: LoRA training requires a model loaded with the Transformers loader. GGUF models are not supported for training."
return
lora_file_path = clean_path(None, lora_name)
if lora_file_path.strip() == '':
yield "Missing or invalid LoRA file name input."