diff --git a/modules/models.py b/modules/models.py index d2b9cc98..9535ea82 100644 --- a/modules/models.py +++ b/modules/models.py @@ -45,12 +45,13 @@ def load_model(model_name, loader=None): model, tokenizer = output else: model = output - if model is None: - return None, None - else: + if model is not None: from modules.transformers_loader import load_tokenizer tokenizer = load_tokenizer(model_name) + if model is None: + return None, None + shared.settings.update({k: v for k, v in metadata.items() if k in shared.settings}) if loader.lower().startswith('exllama') or loader.lower().startswith('tensorrt') or loader == 'llama.cpp': shared.settings['truncation_length'] = shared.args.ctx_size