|
| 1 | +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= |
| 2 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 3 | +# you may not use this file except in compliance with the License. |
| 4 | +# You may obtain a copy of the License at |
| 5 | +# |
| 6 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 7 | +# |
| 8 | +# Unless required by applicable law or agreed to in writing, software |
| 9 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 10 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 11 | +# See the License for the specific language governing permissions and |
| 12 | +# limitations under the License. |
| 13 | +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= |
| 14 | + |
| 15 | + |
| 16 | +from camel.models import BaseModelBackend |
| 17 | +from camel.types import ModelType |
| 18 | + |
| 19 | + |
| 20 | +class TestBaseModelBackend: |
| 21 | + r"""Unit tests for the BaseModelBackend class.""" |
| 22 | + |
| 23 | + def test_preprocess_messages(self): |
| 24 | + r"""Test message preprocessing removes thinking content correctly.""" |
| 25 | + |
| 26 | + class DummyModel(BaseModelBackend): |
| 27 | + @property |
| 28 | + def token_counter(self): |
| 29 | + pass |
| 30 | + |
| 31 | + def run(self, messages): |
| 32 | + pass |
| 33 | + |
| 34 | + def check_model_config(self): |
| 35 | + pass |
| 36 | + |
| 37 | + model = DummyModel(ModelType.GPT_4O_MINI) |
| 38 | + |
| 39 | + # Test basic thinking removal |
| 40 | + messages = [ |
| 41 | + { |
| 42 | + 'role': 'user', |
| 43 | + 'content': 'Hello <think>thinking about response</think> ' |
| 44 | + 'world', |
| 45 | + }, |
| 46 | + { |
| 47 | + 'role': 'assistant', |
| 48 | + 'content': '<think>Let me think...\nThinking more...</' |
| 49 | + 'think>Response', |
| 50 | + }, |
| 51 | + ] |
| 52 | + |
| 53 | + processed = model.preprocess_messages(messages) |
| 54 | + assert len(processed) == 2 |
| 55 | + assert processed[0]['content'] == 'Hello world' |
| 56 | + assert processed[1]['content'] == 'Response' |
| 57 | + |
| 58 | + # Test message without thinking tags |
| 59 | + messages = [{'role': 'user', 'content': 'plain message'}] |
| 60 | + processed = model.preprocess_messages(messages) |
| 61 | + assert processed[0]['content'] == 'plain message' |
| 62 | + |
| 63 | + def test_metaclass_preprocessing(self): |
| 64 | + r"""Test that metaclass automatically preprocesses messages in run |
| 65 | + method.""" |
| 66 | + processed_messages = None |
| 67 | + |
| 68 | + class TestModel(BaseModelBackend): |
| 69 | + @property |
| 70 | + def token_counter(self): |
| 71 | + pass |
| 72 | + |
| 73 | + def run(self, messages): |
| 74 | + nonlocal processed_messages |
| 75 | + processed_messages = messages |
| 76 | + return None |
| 77 | + |
| 78 | + def check_model_config(self): |
| 79 | + pass |
| 80 | + |
| 81 | + model = TestModel(ModelType.GPT_4O_MINI) |
| 82 | + messages = [ |
| 83 | + {'role': 'user', 'content': 'Hello <think>hi</think> world'} |
| 84 | + ] |
| 85 | + |
| 86 | + # Call run method and verify messages were preprocessed |
| 87 | + model.run(messages) |
| 88 | + assert processed_messages is not None |
| 89 | + assert processed_messages[0]['content'] == 'Hello world' |
0 commit comments