diff --git a/extensions/openai/completions.py b/extensions/openai/completions.py
index 56d2059d..5187343f 100644
--- a/extensions/openai/completions.py
+++ b/extensions/openai/completions.py
@@ -134,24 +134,32 @@ def convert_history(history):
user_input_last = True
if current_message:
- chat_dialogue.append([current_message, '', ''])
+ chat_dialogue.append([current_message, '', '', {}])
current_message = ""
current_message = content
elif role == "assistant":
- if "tool_calls" in entry and isinstance(entry["tool_calls"], list) and len(entry["tool_calls"]) > 0 and content.strip() == "":
- continue # skip tool calls
+ meta = {}
+ tool_calls = entry.get("tool_calls")
+ if tool_calls and isinstance(tool_calls, list) and len(tool_calls) > 0:
+ meta["tool_calls"] = tool_calls
+ if content.strip() == "":
+ content = "" # keep empty content, don't skip
+
current_reply = content
user_input_last = False
if current_message:
- chat_dialogue.append([current_message, current_reply, ''])
+ chat_dialogue.append([current_message, current_reply, '', meta])
current_message = ""
current_reply = ""
else:
- chat_dialogue.append(['', current_reply, ''])
+ chat_dialogue.append(['', current_reply, '', meta])
elif role == "tool":
user_input_last = False
- chat_dialogue.append(['', '', content])
+ meta = {}
+ if "tool_call_id" in entry:
+ meta["tool_call_id"] = entry["tool_call_id"]
+ chat_dialogue.append(['', '', content, meta])
elif role == "system":
system_message += f"\n{content}" if system_message else content
diff --git a/extensions/openai/utils.py b/extensions/openai/utils.py
index 9a1de2e7..6937a108 100644
--- a/extensions/openai/utils.py
+++ b/extensions/openai/utils.py
@@ -83,6 +83,104 @@ def checkAndSanitizeToolCallCandidate(candidate_dict: dict, tool_names: list[str
return None
+def _parseChannelToolCalls(answer: str, tool_names: list[str]):
+ """Parse channel-based tool calls used by GPT-OSS and similar models.
+
+ Format:
+ <|channel|>commentary to=functions.func_name <|constrain|>json<|message|>{"arg": "value"}
+ """
+ matches = []
+ for m in re.finditer(
+ r'<\|channel\|>commentary to=functions\.([^<\s]+)\s*(?:<\|constrain\|>json)?<\|message\|>(\{[^}]*(?:\{[^}]*\}[^}]*)*\})',
+ answer
+ ):
+ func_name = m.group(1).strip()
+ if func_name not in tool_names:
+ continue
+ try:
+ arguments = json.loads(m.group(2))
+ matches.append({
+ "type": "function",
+ "function": {
+ "name": func_name,
+ "arguments": arguments
+ }
+ })
+ except json.JSONDecodeError:
+ pass
+ return matches
+
+
+def _parseBareNameToolCalls(answer: str, tool_names: list[str]):
+ """Parse bare function-name style tool calls used by Mistral and similar models.
+
+ Format:
+ functionName{"arg": "value"}
+ Multiple calls are concatenated directly or separated by whitespace.
+ """
+ matches = []
+ # Build pattern that matches any known tool name followed by a JSON object
+ escaped_names = [re.escape(name) for name in tool_names]
+ pattern = r'(?:' + '|'.join(escaped_names) + r')\s*\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
+ for match in re.finditer(pattern, answer):
+ text = match.group(0)
+ # Split into function name and JSON arguments
+ for name in tool_names:
+ if text.startswith(name):
+ json_str = text[len(name):].strip()
+ try:
+ arguments = json.loads(json_str)
+ matches.append({
+ "type": "function",
+ "function": {
+ "name": name,
+ "arguments": arguments
+ }
+ })
+ except json.JSONDecodeError:
+ pass
+ break
+ return matches
+
+
+def _parseXmlParamToolCalls(answer: str, tool_names: list[str]):
+ """Parse XML-parameter style tool calls used by Qwen3.5 and similar models.
+
+ Format:
+
+
+ value
+
+
+ """
+ matches = []
+ for tc_match in re.finditer(r'\s*(.*?)\s*', answer, re.DOTALL):
+ tc_content = tc_match.group(1)
+ func_match = re.search(r']+)>', tc_content)
+ if not func_match:
+ continue
+ func_name = func_match.group(1).strip()
+ if func_name not in tool_names:
+ continue
+ arguments = {}
+ for param_match in re.finditer(r']+)>\s*(.*?)\s*', tc_content, re.DOTALL):
+ param_name = param_match.group(1).strip()
+ param_value = param_match.group(2).strip()
+ try:
+ param_value = json.loads(param_value)
+ except (json.JSONDecodeError, ValueError):
+ pass # keep as string
+ arguments[param_name] = param_value
+ matches.append({
+ "type": "function",
+ "function": {
+ "name": func_name,
+ "arguments": arguments
+ }
+ })
+ return matches
+
+
def parseToolCall(answer: str, tool_names: list[str]):
matches = []
@@ -90,6 +188,21 @@ def parseToolCall(answer: str, tool_names: list[str]):
if len(answer) < 10:
return matches
+ # Check for channel-based tool calls (e.g. GPT-OSS format)
+ matches = _parseChannelToolCalls(answer, tool_names)
+ if matches:
+ return matches
+
+ # Check for XML-parameter style tool calls (e.g. Qwen3.5 format)
+ matches = _parseXmlParamToolCalls(answer, tool_names)
+ if matches:
+ return matches
+
+ # Check for bare function-name style tool calls (e.g. Mistral format)
+ matches = _parseBareNameToolCalls(answer, tool_names)
+ if matches:
+ return matches
+
# Define the regex pattern to find the JSON content wrapped in , , , and other tags observed from various models
patterns = [r"(```[^\n]*)\n(.*?)```", r"<([^>]+)>(.*?)\1>"]
diff --git a/modules/chat.py b/modules/chat.py
index 7c58542f..2beb1543 100644
--- a/modules/chat.py
+++ b/modules/chat.py
@@ -159,13 +159,20 @@ def generate_chat_prompt(user_input, state, **kwargs):
user_msg = entry[0].strip()
assistant_msg = entry[1].strip()
tool_msg = entry[2].strip() if len(entry) > 2 else ''
+ entry_meta = entry[3] if len(entry) > 3 else {}
row_idx = len(history) - i - 1
if tool_msg:
- messages.insert(insert_pos, {"role": "tool", "content": tool_msg})
+ tool_message = {"role": "tool", "content": tool_msg}
+ if "tool_call_id" in entry_meta:
+ tool_message["tool_call_id"] = entry_meta["tool_call_id"]
+ messages.insert(insert_pos, tool_message)
- if assistant_msg:
+ if not assistant_msg and entry_meta.get('tool_calls'):
+ # Assistant message with only tool_calls and no text content
+ messages.insert(insert_pos, {"role": "assistant", "content": "", "tool_calls": entry_meta['tool_calls']})
+ elif assistant_msg:
# Handle GPT-OSS as a special case
if '<|channel|>analysis<|message|>' in assistant_msg or '<|channel|>final<|message|>' in assistant_msg:
thinking_content = ""
@@ -240,6 +247,10 @@ def generate_chat_prompt(user_input, state, **kwargs):
# Default case (used by all other models)
messages.insert(insert_pos, {"role": "assistant", "content": assistant_msg})
+ # Attach tool_calls metadata to the assistant message if present
+ if entry_meta.get('tool_calls') and messages[insert_pos].get('role') == 'assistant':
+ messages[insert_pos]['tool_calls'] = entry_meta['tool_calls']
+
if user_msg not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
# Check for user message attachments in metadata
user_key = f"user_{row_idx}"