Skip to content

Commit e577058

Browse files
authored
fix: parallel tool call sequence bug, upadte doc and readme (camel-ai#2148)
1 parent 9a4510d commit e577058

File tree

7 files changed

+455
-30
lines changed

7 files changed

+455
-30
lines changed

README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,13 @@ For more information please contact [email protected]
416416

417417
- **Ambassador Project:** Advocate for CAMEL-AI, host events, and contribute content. [Learn more](https://www.camel-ai.org/community)
418418

419+
- **WeChat Community:** Scan the QR code below to join our WeChat community.
420+
421+
<div align="center">
422+
<img src="misc/wechat.jpg" alt="WeChat QR Code" width="200">
423+
</div>
424+
425+
419426
<br>
420427

421428
## Citation

camel/embeddings/azure_embedding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class AzureEmbedding(BaseEmbedding[str]):
3131
Args:
3232
model_type (EmbeddingModelType, optional): The model type to be
3333
used for text embeddings.
34-
(default: :obj:`TEXT_EMBEDDING_ADA_2`)
34+
(default: :obj:`TEXT_EMBEDDING_3_SMALL`)
3535
url (Optional[str], optional): The url to the Azure OpenAI service.
3636
(default: :obj:`None`)
3737
api_key (str, optional): The API key for authenticating with the

camel/models/base_model.py

Lines changed: 111 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -108,42 +108,125 @@ def preprocess_messages(
108108
) -> List[OpenAIMessage]:
109109
r"""Preprocess messages before sending to model API.
110110
Removes thinking content from assistant and user messages.
111+
Automatically formats messages for parallel tool calls if tools are
112+
detected.
111113
112114
Args:
113-
messages (List[OpenAIMessage]): Original messages
115+
messages (List[OpenAIMessage]): Original messages.
114116
115117
Returns:
116118
List[OpenAIMessage]: Preprocessed messages
117119
"""
118-
119-
def should_process_thinking(msg: OpenAIMessage) -> bool:
120-
# Only process thinking content for assistant and user messages
121-
return msg['role'] in ['assistant', 'user'] and isinstance(
122-
msg['content'], str
120+
# Process all messages in a single pass
121+
processed_messages = []
122+
tool_calls_buffer: List[OpenAIMessage] = []
123+
tool_responses_buffer: Dict[str, OpenAIMessage] = {}
124+
has_tool_calls = False
125+
126+
for msg in messages:
127+
# Remove thinking content if needed
128+
role = msg.get('role')
129+
content = msg.get('content')
130+
if role in ['assistant', 'user'] and isinstance(content, str):
131+
if '<think>' in content and '</think>' in content:
132+
content = re.sub(
133+
r'<think>.*?</think>', '', content, flags=re.DOTALL
134+
).strip()
135+
processed_msg = dict(msg)
136+
processed_msg['content'] = content
137+
else:
138+
processed_msg = dict(msg)
139+
140+
# Check and track tool calls/responses
141+
is_tool_call = (
142+
processed_msg.get("role") == "assistant"
143+
and "tool_calls" in processed_msg
144+
)
145+
is_tool_response = (
146+
processed_msg.get("role") == "tool"
147+
and "tool_call_id" in processed_msg
123148
)
124149

125-
def remove_thinking(content: str) -> str:
126-
# Only remove thinking content if the tags are present
127-
if '<think>' in content and '</think>' in content:
128-
return re.sub(
129-
r'<think>.*?</think>',
130-
'',
131-
content,
132-
flags=re.DOTALL,
133-
).strip()
134-
return content
135-
136-
return [
137-
{ # type: ignore[misc]
138-
**msg,
139-
'content': (
140-
remove_thinking(msg['content']) # type: ignore[arg-type]
141-
if should_process_thinking(msg)
142-
else msg['content']
143-
),
144-
}
145-
for msg in messages
146-
]
150+
if is_tool_call or is_tool_response:
151+
has_tool_calls = True
152+
153+
# Store the processed message for later formatting if needed
154+
processed_messages.append(processed_msg)
155+
156+
# If no tool calls detected, return the processed messages
157+
if not has_tool_calls:
158+
return processed_messages # type: ignore[return-value]
159+
160+
# Format messages for parallel tool calls
161+
formatted_messages = []
162+
tool_calls_buffer = []
163+
tool_responses_buffer = {}
164+
165+
for msg in processed_messages: # type: ignore[assignment]
166+
# If this is an assistant message with tool calls, add it to the
167+
# buffer
168+
if msg.get("role") == "assistant" and "tool_calls" in msg:
169+
tool_calls_buffer.append(msg)
170+
continue
171+
172+
# If this is a tool response, add it to the responses buffer
173+
if msg.get("role") == "tool" and "tool_call_id" in msg:
174+
tool_call_id = msg.get("tool_call_id")
175+
if isinstance(tool_call_id, str):
176+
tool_responses_buffer[tool_call_id] = msg
177+
continue
178+
179+
# Process any complete tool call + responses before adding regular
180+
# messages
181+
if tool_calls_buffer and tool_responses_buffer:
182+
# Add the assistant message with tool calls
183+
assistant_msg = tool_calls_buffer[0]
184+
formatted_messages.append(assistant_msg)
185+
186+
# Add all matching tool responses for this assistant message
187+
tool_calls = assistant_msg.get("tool_calls", [])
188+
if isinstance(tool_calls, list):
189+
for tool_call in tool_calls:
190+
tool_call_id = tool_call.get("id")
191+
if (
192+
isinstance(tool_call_id, str)
193+
and tool_call_id in tool_responses_buffer
194+
):
195+
formatted_messages.append(
196+
tool_responses_buffer[tool_call_id]
197+
)
198+
del tool_responses_buffer[tool_call_id]
199+
200+
tool_calls_buffer.pop(0)
201+
202+
# Add the current regular message
203+
formatted_messages.append(msg)
204+
205+
# Process any remaining buffered tool calls and responses
206+
while tool_calls_buffer:
207+
assistant_msg = tool_calls_buffer[0]
208+
formatted_messages.append(assistant_msg)
209+
210+
tool_calls = assistant_msg.get("tool_calls", [])
211+
if isinstance(tool_calls, list):
212+
for tool_call in tool_calls:
213+
tool_call_id = tool_call.get("id")
214+
if (
215+
isinstance(tool_call_id, str)
216+
and tool_call_id in tool_responses_buffer
217+
):
218+
formatted_messages.append(
219+
tool_responses_buffer[tool_call_id]
220+
)
221+
del tool_responses_buffer[tool_call_id]
222+
223+
tool_calls_buffer.pop(0)
224+
225+
# Add any remaining tool responses
226+
for response in tool_responses_buffer.values():
227+
formatted_messages.append(response)
228+
229+
return formatted_messages
147230

148231
@abstractmethod
149232
def _run(

camel/toolkits/google_calendar_toolkit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from camel.logger import get_logger
2121
from camel.toolkits import FunctionTool
2222
from camel.toolkits.base import BaseToolkit
23-
from camel.utils.commons import MCPServer, api_keys_required
23+
from camel.utils import MCPServer, api_keys_required
2424

2525
logger = get_logger(__name__)
2626

docs/key_modules/embeddings.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,9 @@ Utilizes OpenAI's models for generating image embeddings. This will requires Ope
3737
### 2.5. `AzureOpenAI`
3838
Utilizes OpenAI's models for generating text embeddings. This will requires Azure OpenAI API Key.
3939

40+
### 2.6. `TogetherEmbedding`
41+
Utilizes Together AI's models for generating text embeddings. This will requires Together AI API Key.
42+
4043

4144
## 3. Get Started
4245
To use the embedding functionalities, you need to import the necessary classes.
@@ -100,3 +103,14 @@ azure_openai_embedding = AzureEmbedding(model_type=EmbeddingModelType.TEXT_EMBED
100103
# Generate embeddings for a list of texts
101104
embeddings = azure_openai_embedding.embed_list(["Hello, world!", "Another example"])
102105
```
106+
107+
### 3.6. Using `TogetherEmbedding`
108+
```python
109+
from camel.embeddings import TogetherEmbedding
110+
111+
# Initialize the Together AI embedding with a specific model
112+
together_embedding = TogetherEmbedding(model_name="togethercomputer/m2-bert-80M-8k-retrieval")
113+
114+
# Generate embeddings for a list of texts
115+
embeddings = together_embedding.embed_list(["Hello, world!", "Another example"])
116+
```

misc/wechat.jpg

195 KB
Loading

0 commit comments

Comments
 (0)