mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-03-18 03:14:39 +01:00
Fix GPT-OSS channel markup leaking into UI when model skips analysis block
This commit is contained in:
parent
09a6549816
commit
cb08ba63dc
|
|
@ -882,6 +882,8 @@ def generate_search_query(user_message, state):
|
|||
query = query.rsplit("</think>", 1)[1]
|
||||
elif "<|start|>assistant<|channel|>final<|message|>" in query:
|
||||
query = query.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
|
||||
elif "<|channel|>final<|message|>" in query:
|
||||
query = query.rsplit("<|channel|>final<|message|>", 1)[1]
|
||||
elif "</seed:think>" in query:
|
||||
query = query.rsplit("</seed:think>", 1)[1]
|
||||
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ import html as html_module
|
|||
# Use None for start_tag to match from beginning (end-only formats should be listed last)
|
||||
THINKING_FORMATS = [
|
||||
('<think>', '</think>', None),
|
||||
('<|channel|>analysis<|message|>', '<|end|>', '<|start|>assistant<|channel|>final<|message|>'),
|
||||
('<|channel|>commentary<|message|>', '<|end|>', '<|start|>assistant<|channel|>final<|message|>'),
|
||||
('<|channel|>analysis<|message|>', '<|end|>', '<|channel|>final<|message|>'),
|
||||
('<|channel|>commentary<|message|>', '<|end|>', '<|channel|>final<|message|>'),
|
||||
('<seed:think>', '</seed:think>', None),
|
||||
('<|think|>', '<|end|>', '<|content|>'), # Solar Open
|
||||
# ('Thinking Process:', '</think>', None), # Qwen3.5 verbose thinking outside tags -- removed: too prone to false positives in streaming
|
||||
|
|
@ -81,4 +81,14 @@ def extract_reasoning(text, html_escaped=False):
|
|||
|
||||
return text[thought_start:thought_end], text[content_start:]
|
||||
|
||||
# Handle standalone GPT-OSS final channel marker without a preceding
|
||||
# analysis/commentary block (the model skipped thinking entirely).
|
||||
for marker in ['<|start|>assistant<|channel|>final<|message|>', '<|channel|>final<|message|>']:
|
||||
marker_esc = esc(marker)
|
||||
pos = text.find(marker_esc)
|
||||
if pos != -1:
|
||||
before = text[:pos].strip()
|
||||
after = text[pos + len(marker_esc):]
|
||||
return (before if before else None), after
|
||||
|
||||
return None, text
|
||||
|
|
|
|||
|
|
@ -728,6 +728,8 @@ def generate_prompt_variation(state):
|
|||
variation = variation.rsplit("</think>", 1)[1]
|
||||
elif "<|start|>assistant<|channel|>final<|message|>" in variation:
|
||||
variation = variation.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
|
||||
elif "<|channel|>final<|message|>" in variation:
|
||||
variation = variation.rsplit("<|channel|>final<|message|>", 1)[1]
|
||||
elif "</seed:think>" in variation:
|
||||
variation = variation.rsplit("</seed:think>", 1)[1]
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue