Skip to content

Commit 457e7c5

Browse files
authored
fix: Remove <think> content in model request (camel-ai#1613)
1 parent f640fb7 commit 457e7c5

File tree

9 files changed

+150
-26
lines changed

9 files changed

+150
-26
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.21
29+
placeholder: E.g., 0.2.22
3030
validations:
3131
required: true
3232

camel/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.21'
17+
__version__ = '0.2.22'
1818

1919
__all__ = [
2020
'__version__',

camel/models/base_model.py

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111
# See the License for the specific language governing permissions and
1212
# limitations under the License.
1313
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
import abc
15+
import re
1416
from abc import ABC, abstractmethod
1517
from typing import Any, Dict, List, Optional, Union
1618

@@ -27,7 +29,30 @@
2729
from camel.utils import BaseTokenCounter
2830

2931

30-
class BaseModelBackend(ABC):
32+
class ModelBackendMeta(abc.ABCMeta):
33+
r"""Metaclass that automatically preprocesses messages in run method.
34+
35+
Automatically wraps the run method of any class inheriting from
36+
BaseModelBackend to preprocess messages (remove <think> tags) before they
37+
are sent to the model.
38+
"""
39+
40+
def __new__(mcs, name, bases, namespace):
41+
r"""Wraps run method with preprocessing if it exists in the class."""
42+
if 'run' in namespace:
43+
original_run = namespace['run']
44+
45+
def wrapped_run(
46+
self, messages: List[OpenAIMessage], *args, **kwargs
47+
):
48+
messages = self.preprocess_messages(messages)
49+
return original_run(self, messages, *args, **kwargs)
50+
51+
namespace['run'] = wrapped_run
52+
return super().__new__(mcs, name, bases, namespace)
53+
54+
55+
class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
3156
r"""Base class for different model backends.
3257
It may be OpenAI API, a local LLM, a stub for unit tests, etc.
3358
@@ -73,6 +98,34 @@ def token_counter(self) -> BaseTokenCounter:
7398
"""
7499
pass
75100

101+
def preprocess_messages(
102+
self, messages: List[OpenAIMessage]
103+
) -> List[OpenAIMessage]:
104+
r"""Preprocess messages before sending to model API.
105+
Removes thinking content and other model-specific preprocessing.
106+
107+
Args:
108+
messages (List[OpenAIMessage]): Original messages
109+
110+
Returns:
111+
List[OpenAIMessage]: Preprocessed messages
112+
"""
113+
# Remove thinking content from messages before sending to API
114+
# This ensures only the final response is sent, excluding
115+
# intermediate thought processes
116+
return [
117+
{ # type: ignore[misc]
118+
**msg,
119+
'content': re.sub(
120+
r'<think>.*?</think>',
121+
'',
122+
msg['content'], # type: ignore[arg-type]
123+
flags=re.DOTALL,
124+
).strip(),
125+
}
126+
for msg in messages
127+
]
128+
76129
@abstractmethod
77130
def run(
78131
self,

camel/models/deepseek_model.py

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,6 @@ def run(
118118
if self.model_type in [
119119
ModelType.DEEPSEEK_REASONER,
120120
]:
121-
import re
122-
123121
logger.warning(
124122
"You are using a DeepSeek Reasoner model, "
125123
"which has certain limitations, reference: "
@@ -141,22 +139,6 @@ def run(
141139
if key in self.model_config_dict:
142140
del self.model_config_dict[key]
143141

144-
# Remove thinking content from messages before sending to API
145-
# This ensures only the final response is sent, excluding
146-
# intermediate thought processes
147-
messages = [
148-
{ # type: ignore[misc]
149-
**msg,
150-
'content': re.sub(
151-
r'<think>.*?</think>',
152-
'',
153-
msg['content'], # type: ignore[arg-type]
154-
flags=re.DOTALL,
155-
).strip(),
156-
}
157-
for msg in messages
158-
]
159-
160142
response = self._client.chat.completions.create(
161143
messages=messages,
162144
model=self.model_type,

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
project = 'CAMEL'
2828
copyright = '2024, CAMEL-AI.org'
2929
author = 'CAMEL-AI.org'
30-
release = '0.2.21'
30+
release = '0.2.22'
3131

3232
html_favicon = (
3333
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'

docs/get_started/installation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ conda create --name camel python=3.10
6060
conda activate camel
6161
6262
# Clone github repo
63-
git clone -b v0.2.21 https://github.com/camel-ai/camel.git
63+
git clone -b v0.2.22 https://github.com/camel-ai/camel.git
6464
6565
# Change directory into project directory
6666
cd camel

docs/key_modules/loaders.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -340,14 +340,14 @@ response = jina_reader.read_content("https://docs.camel-ai.org/")
340340
print(response)
341341
```
342342
```markdown
343-
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.21 documentation
343+
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.22 documentation
344344
===============
345345

346346
[Skip to main content](https://docs.camel-ai.org/#main-content)
347347

348348
Back to top Ctrl+K
349349

350-
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.21](https://docs.camel-ai.org/#)
350+
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.22](https://docs.camel-ai.org/#)
351351

352352
Search Ctrl+K
353353

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
44

55
[tool.poetry]
66
name = "camel-ai"
7-
version = "0.2.21"
7+
version = "0.2.22"
88
authors = ["CAMEL-AI.org"]
99
description = "Communicative Agents for AI Society Study"
1010
readme = "README.md"

test/models/test_base_model.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
15+
16+
from camel.models import BaseModelBackend
17+
from camel.types import ModelType
18+
19+
20+
class TestBaseModelBackend:
21+
r"""Unit tests for the BaseModelBackend class."""
22+
23+
def test_preprocess_messages(self):
24+
r"""Test message preprocessing removes thinking content correctly."""
25+
26+
class DummyModel(BaseModelBackend):
27+
@property
28+
def token_counter(self):
29+
pass
30+
31+
def run(self, messages):
32+
pass
33+
34+
def check_model_config(self):
35+
pass
36+
37+
model = DummyModel(ModelType.GPT_4O_MINI)
38+
39+
# Test basic thinking removal
40+
messages = [
41+
{
42+
'role': 'user',
43+
'content': 'Hello <think>thinking about response</think> '
44+
'world',
45+
},
46+
{
47+
'role': 'assistant',
48+
'content': '<think>Let me think...\nThinking more...</'
49+
'think>Response',
50+
},
51+
]
52+
53+
processed = model.preprocess_messages(messages)
54+
assert len(processed) == 2
55+
assert processed[0]['content'] == 'Hello world'
56+
assert processed[1]['content'] == 'Response'
57+
58+
# Test message without thinking tags
59+
messages = [{'role': 'user', 'content': 'plain message'}]
60+
processed = model.preprocess_messages(messages)
61+
assert processed[0]['content'] == 'plain message'
62+
63+
def test_metaclass_preprocessing(self):
64+
r"""Test that metaclass automatically preprocesses messages in run
65+
method."""
66+
processed_messages = None
67+
68+
class TestModel(BaseModelBackend):
69+
@property
70+
def token_counter(self):
71+
pass
72+
73+
def run(self, messages):
74+
nonlocal processed_messages
75+
processed_messages = messages
76+
return None
77+
78+
def check_model_config(self):
79+
pass
80+
81+
model = TestModel(ModelType.GPT_4O_MINI)
82+
messages = [
83+
{'role': 'user', 'content': 'Hello <think>hi</think> world'}
84+
]
85+
86+
# Call run method and verify messages were preprocessed
87+
model.run(messages)
88+
assert processed_messages is not None
89+
assert processed_messages[0]['content'] == 'Hello world'

0 commit comments

Comments
 (0)