10000 remove think_end_token_id in streaming content (#3327) · InternLM/lmdeploy@448491b · GitHub
[go: up one dir, main page]

Skip to content

Commit 448491b

Browse files
authored
remove think_end_token_id in streaming content (#3327)
1 parent ef32d85 commit 448491b

File tree

1 file changed

+5
-2
lines changed

1 file changed

+5
-2
lines changed

lmdeploy/serve/openai/reasoning_parser/deepseek_r1_reasoning_parser.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,11 @@ def extract_reasoning_content_streaming(
5151
about what has previously been parsed and extracted (see constructor)
5252
"""
5353
# Skip single special tokens
54-
if len(delta_token_ids) == 1 and (delta_token_ids[0] in [self.think_start_token_id, self.think_end_token_id]):
55-
return None
54+
if len(delta_token_ids) == 1:
55+
if delta_token_ids[0] == self.think_end_token_id:
56+
return DeltaMessage(content='')
57+
elif delta_token_ids[0] == self.think_start_token_id:
58+
return None
5659

5760
# Check if <think> is present in previous or delta.
5861
# Keep compatibility with models that don't generate <think> tokens.

0 commit comments

Comments
 (0)
0