Safer check for is_multimodal when loading models

Avoids unrelated multimodal error when a model fails to load due
to lack of memory.
This commit is contained in:
oobabooga 2025-08-28 11:13:19 -07:00
parent cfc83745ec
commit cb8780a4ce

View file

@ -57,7 +57,7 @@ def load_model(model_name, loader=None):
shared.settings['truncation_length'] = shared.args.ctx_size shared.settings['truncation_length'] = shared.args.ctx_size
shared.is_multimodal = False shared.is_multimodal = False
if loader.lower() in ('exllamav3', 'llama.cpp'): if loader.lower() in ('exllamav3', 'llama.cpp') and hasattr(model, 'is_multimodal'):
shared.is_multimodal = model.is_multimodal() shared.is_multimodal = model.is_multimodal()
logger.info(f"Loaded \"{model_name}\" in {(time.time()-t0):.2f} seconds.") logger.info(f"Loaded \"{model_name}\" in {(time.time()-t0):.2f} seconds.")