diff --git a/modules/chat.py b/modules/chat.py
index daecd50b..10785c19 100644
--- a/modules/chat.py
+++ b/modules/chat.py
@@ -882,6 +882,8 @@ def generate_search_query(user_message, state):
query = query.rsplit("", 1)[1]
elif "<|start|>assistant<|channel|>final<|message|>" in query:
query = query.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
+ elif "<|channel|>final<|message|>" in query:
+ query = query.rsplit("<|channel|>final<|message|>", 1)[1]
elif "" in query:
query = query.rsplit("", 1)[1]
diff --git a/modules/reasoning.py b/modules/reasoning.py
index 3a9ab546..bc61aab3 100644
--- a/modules/reasoning.py
+++ b/modules/reasoning.py
@@ -4,8 +4,8 @@ import html as html_module
# Use None for start_tag to match from beginning (end-only formats should be listed last)
THINKING_FORMATS = [
('', '', None),
- ('<|channel|>analysis<|message|>', '<|end|>', '<|start|>assistant<|channel|>final<|message|>'),
- ('<|channel|>commentary<|message|>', '<|end|>', '<|start|>assistant<|channel|>final<|message|>'),
+ ('<|channel|>analysis<|message|>', '<|end|>', '<|channel|>final<|message|>'),
+ ('<|channel|>commentary<|message|>', '<|end|>', '<|channel|>final<|message|>'),
('', '', None),
('<|think|>', '<|end|>', '<|content|>'), # Solar Open
# ('Thinking Process:', '', None), # Qwen3.5 verbose thinking outside tags -- removed: too prone to false positives in streaming
@@ -81,4 +81,14 @@ def extract_reasoning(text, html_escaped=False):
return text[thought_start:thought_end], text[content_start:]
+ # Handle standalone GPT-OSS final channel marker without a preceding
+ # analysis/commentary block (the model skipped thinking entirely).
+ for marker in ['<|start|>assistant<|channel|>final<|message|>', '<|channel|>final<|message|>']:
+ marker_esc = esc(marker)
+ pos = text.find(marker_esc)
+ if pos != -1:
+ before = text[:pos].strip()
+ after = text[pos + len(marker_esc):]
+ return (before if before else None), after
+
return None, text
diff --git a/modules/ui_image_generation.py b/modules/ui_image_generation.py
index e9df9bd3..dc108f6d 100644
--- a/modules/ui_image_generation.py
+++ b/modules/ui_image_generation.py
@@ -728,6 +728,8 @@ def generate_prompt_variation(state):
variation = variation.rsplit("", 1)[1]
elif "<|start|>assistant<|channel|>final<|message|>" in variation:
variation = variation.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
+ elif "<|channel|>final<|message|>" in variation:
+ variation = variation.rsplit("<|channel|>final<|message|>", 1)[1]
elif "" in variation:
variation = variation.rsplit("", 1)[1]