Several small fixes

- Stop llama-server subprocess on model unload instead of relying on GC
- Fix tool_calls[].index being string instead of int in API responses
- Omit tool_calls key from API response when empty per OpenAI spec
- Prevent division by zero when micro_batch_size > batch_size in training
- Copy sampler_priority list before mutating in ExLlamaV3
- Normalize presence/frequency_penalty names for ExLlamaV3 sampler sorting
- Restore original chat_template after training instead of leaving it mutated
This commit is contained in:
oobabooga 2026-03-06 16:52:02 -03:00
parent 044566d42d
commit d03923924a
4 changed files with 16 additions and 4 deletions

View file

@ -333,7 +333,8 @@ def do_train(lora_name: str, always_override: bool, all_linear: bool, q_proj_en:
yield "Cannot input zeroes."
return
gradient_accumulation_steps = batch_size // micro_batch_size
gradient_accumulation_steps = max(1, batch_size // micro_batch_size)
original_chat_template = getattr(shared.tokenizer, 'chat_template', None)
if shared.tokenizer.pad_token_id is None:
shared.tokenizer.pad_token_id = shared.tokenizer.eos_token_id
shared.tokenizer.padding_side = "right"
@ -820,6 +821,10 @@ def do_train(lora_name: str, always_override: bool, all_linear: bool, q_proj_en:
logger.info("Training complete, saving")
lora_model.save_pretrained(lora_file_path)
# Restore the original chat_template if we changed it for training
if shared.tokenizer is not None and hasattr(shared.tokenizer, 'chat_template'):
shared.tokenizer.chat_template = original_chat_template
if WANT_INTERRUPT:
logger.info("Training interrupted.")
yield f"Interrupted. Incomplete LoRA saved to `{lora_file_path}`."