From 521ddbb7225e34b3ee28b9d3f97a1b27764c9337 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 6 Mar 2026 01:56:13 -0300 Subject: [PATCH] Security: restrict API model loading args to UI-exposed parameters The /v1/internal/model/load endpoint previously allowed setting any shared.args attribute, including security-sensitive flags like trust_remote_code. Now only keys from list_model_elements() are accepted. --- extensions/openai/models.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/extensions/openai/models.py b/extensions/openai/models.py index 115149b0..74f9dbee 100644 --- a/extensions/openai/models.py +++ b/extensions/openai/models.py @@ -1,4 +1,4 @@ -from modules import shared +from modules import shared, ui from modules.logging_colors import logger from modules.LoRA import add_lora_to_model from modules.models import load_model, unload_model @@ -46,9 +46,13 @@ def _load_model(data): update_model_parameters(model_settings) # Update shared.args with custom model loading settings + # Security: only allow keys that correspond to model loading + # parameters exposed in the UI. Never allow security-sensitive + # flags like trust_remote_code to be set via the API. + allowed_keys = set(ui.list_model_elements()) if args: for k in args: - if hasattr(shared.args, k): + if k in allowed_keys and hasattr(shared.args, k): setattr(shared.args, k, args[k]) shared.model, shared.tokenizer = load_model(model_name)