Make --model work with absolute paths, eg --model /tmp/gemma-3-270m-it-IQ4_NL.gguf

This commit is contained in:
oobabooga 2025-08-22 11:46:02 -07:00
parent fd41f2fafc
commit f247c2ae62
4 changed files with 35 additions and 19 deletions

View file

@ -5,6 +5,7 @@ from pathlib import Path
import modules.shared as shared
from modules.logging_colors import logger
from modules.models_settings import get_model_metadata
from modules.utils import resolve_model_path
last_generation_time = time.time()
@ -69,17 +70,24 @@ def load_model(model_name, loader=None):
def llama_cpp_server_loader(model_name):
from modules.llama_cpp_server import LlamaServer
path = Path(f'{shared.args.model_dir}/{model_name}')
path = resolve_model_path(model_name)
if path.is_file():
model_file = path
else:
model_file = sorted(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf'))[0]
gguf_files = sorted(path.glob('*.gguf'))
if not gguf_files:
logger.error(f"No .gguf models found in the directory: {path}")
return None, None
model_file = gguf_files[0]
try:
model = LlamaServer(model_file)
return model, model
except Exception as e:
logger.error(f"Error loading the model with llama.cpp: {str(e)}")
return None, None
def transformers_loader(model_name):