import os
import sys
import unittest
from unittest.mock import patch, MagicMock

# 添加项目根目录到Python路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))

from src.llm.llm__client import LLMClient, create_llm_client


class TestLLMClient(unittest.TestCase):
    def setUp(self):
        # 保存原始环境变量以便测试后恢复
        self.original_env = os.environ.copy()
        # 清除可能影响测试的环境变量
        for var in ['LLM_TOKEN', 'LLM_BASE_URL', 'LLM_MODEL_NAME']:
            if var in os.environ:
                del os.environ[var]
    
    def tearDown(self):
        # 恢复原始环境变量
        os.environ.clear()
        os.environ.update(self.original_env)
    
    def test_initialization_default_config(self):
        """测试默认配置初始化"""
        client = LLMClient()
        
        # 验证默认配置
        self.assertEqual(client.config['api_key'], 'your_llm_token')
        self.assertEqual(client.config['api_base'], 'https://api-inference.modelscope.cn/v1/')
        self.assertEqual(client.config['model'], 'Qwen/Qwen3-Coder-480B-A35B-Instruct')
        self.assertEqual(client.config['temperature'], 0.7)
        self.assertEqual(client.config['max_tokens'], 2000)
        
        # 验证请求头
        self.assertEqual(client.session.headers['Authorization'], 'Bearer your_llm_token')
        self.assertEqual(client.session.headers['Content-Type'], 'application/json')
    
    def test_initialization_with_env_vars(self):
        """测试使用环境变量初始化"""
        # 设置环境变量
        os.environ['LLM_TOKEN'] = 'test_env_token'
        os.environ['LLM_BASE_URL'] = 'https://test-api.example.com/v1/'
        os.environ['LLM_MODEL_NAME'] = 'test-model-name'
        
        client = LLMClient()
        
        # 验证环境变量配置被正确应用
        self.assertEqual(client.config['api_key'], 'test_env_token')
        self.assertEqual(client.config['api_base'], 'https://test-api.example.com/v1/')
        self.assertEqual(client.config['model'], 'test-model-name')
    
    def test_initialization_with_custom_config(self):
        """测试使用自定义配置初始化"""
        custom_config = {
            'api_key': 'custom_api_key',
            'model': 'custom-model',
            'temperature': 0.5,
            'max_tokens': 1000
        }
        
        client = LLMClient(custom_config)
        
        # 验证自定义配置被正确应用
        self.assertEqual(client.config['api_key'], 'custom_api_key')
        self.assertEqual(client.config['model'], 'custom-model')
        self.assertEqual(client.config['temperature'], 0.5)
        self.assertEqual(client.config['max_tokens'], 1000)
        # 未指定的配置保持默认值
        self.assertEqual(client.config['api_base'], 'https://api-inference.modelscope.cn/v1/')
    
    def test_prepare_messages(self):
        """测试消息准备功能"""
        client = LLMClient()
        user_input = "测试输入"
        system_prompt = "测试系统提示"
        
        messages = client._prepare_messages(user_input, system_prompt)
        
        self.assertEqual(len(messages), 2)
        self.assertEqual(messages[0]['role'], 'system')
        self.assertEqual(messages[0]['content'], system_prompt)
        self.assertEqual(messages[1]['role'], 'user')
        self.assertEqual(messages[1]['content'], user_input)
    
    def test_prepare_tool_params(self):
        """测试工具参数准备功能"""
        client = LLMClient()
        tools = [
            {
                'name': 'test_tool',
                'description': '测试工具',
                'parameters': {'type': 'object', 'properties': {}}
            }
        ]
        
        formatted_tools = client._prepare_tool_params(tools)
        
        self.assertEqual(len(formatted_tools), 1)
        self.assertEqual(formatted_tools[0]['type'], 'function')
        self.assertEqual(formatted_tools[0]['function']['name'], 'test_tool')
        self.assertEqual(formatted_tools[0]['function']['description'], '测试工具')
    
    @patch('src.llm.llm__client.requests.Session')
    def test_call_with_tools_success_direct_answer(self, mock_session):
        """测试成功调用大模型并直接获得回答"""
        # 配置模拟对象
        mock_response = MagicMock()
        mock_response.raise_for_status.return_value = None
        mock_response.json.return_value = {
            'choices': [
                {
                    'message': {
                        'content': '这是大模型的回答'
                    }
                }
            ]
        }
        mock_session_instance = MagicMock()
        mock_session_instance.post.return_value = mock_response
        mock_session.return_value = mock_session_instance
        
        client = LLMClient()
        # 由于我们模拟了Session类，需要重新设置session属性
        client.session = mock_session_instance
        
        # 执行测试
        tools = [{'name': 'test_tool', 'description': '测试工具', 'parameters': {}}]
        user_input = '用户输入'
        system_prompt = '系统提示'
        
        # 打印输入信息
        print("\n=== 测试场景: 大模型直接回答 ===")
        print(f"输入信息:")
        print(f"  系统提示: {system_prompt}")
        print(f"  用户输入: {user_input}")
        print(f"  工具信息: {tools}")
        
        result = client.call_with_tools(tools, user_input, system_prompt)
        
        # 打印输出信息
        print(f"输出信息:")
        print(f"  状态: {result['status']}")
        print(f"  回答内容: {result['content']}")
        print("==============================\n")
        
        # 验证结果
        self.assertEqual(result['status'], 'success')
        self.assertEqual(result['content'], '这是大模型的回答')
        mock_session_instance.post.assert_called_once()
    
    @patch('src.llm.llm__client.requests.Session')
    def test_call_with_tools_success_tool_call(self, mock_session):
        """测试成功调用大模型并获得工具调用结果"""
        # 配置模拟对象
        mock_response = MagicMock()
        mock_response.raise_for_status.return_value = None
        mock_response.json.return_value = {
            'choices': [
                {
                    'message': {
                        'tool_calls': [
                            {
                                'type': 'function',
                                'function': {
                                    'name': 'test_tool',
                                    'arguments': '{"param1": "value1"}'
                                }
                            }
                        ]
                    }
                }
            ]
        }
        mock_session_instance = MagicMock()
        mock_session_instance.post.return_value = mock_response
        mock_session.return_value = mock_session_instance
        
        client = LLMClient()
        client.session = mock_session_instance
        
        # 执行测试
        tools = [{'name': 'test_tool', 'description': '测试工具', 'parameters': {}}]
        user_input = '用户输入'
        system_prompt = '系统提示'
        
        # 打印输入信息
        print("\n=== 测试场景: 大模型调用工具 ===")
        print(f"输入信息:")
        print(f"  系统提示: {system_prompt}")
        print(f"  用户输入: {user_input}")
        print(f"  工具信息: {tools}")
        
        result = client.call_with_tools(tools, user_input, system_prompt)
        
        # 打印输出信息
        print(f"输出信息:")
        print(f"  状态: {result['status']}")
        print(f"  工具调用名称: {result['tool_call']['name']}")
        print(f"  工具调用参数: {result['tool_call']['parameters']}")
        print("==============================\n")
        
        # 验证结果
        self.assertEqual(result['status'], 'success')
        self.assertEqual(result['tool_call']['name'], 'test_tool')
        self.assertEqual(result['tool_call']['parameters']['param1'], 'value1')
        mock_session_instance.post.assert_called_once()
    
    @patch('src.llm.llm__client.requests.Session')
    def test_call_with_tools_api_error(self, mock_session):
        """测试API请求失败的情况"""
        # 配置模拟对象抛出异常
        mock_session_instance = MagicMock()
        mock_session_instance.post.side_effect = Exception("API请求失败")
        mock_session.return_value = mock_session_instance
        
        client = LLMClient()
        client.session = mock_session_instance
        
        # 执行测试
        tools = [{'name': 'test_tool', 'description': '测试工具', 'parameters': {}}]
        user_input = '用户输入'
        system_prompt = '系统提示'
        
        # 打印输入信息
        print("\n=== 测试场景: API请求失败 ===")
        print(f"输入信息:")
        print(f"  系统提示: {system_prompt}")
        print(f"  用户输入: {user_input}")
        print(f"  工具信息: {tools}")
        
        result = client.call_with_tools(tools, user_input, system_prompt)
        
        # 打印输出信息
        print(f"输出信息:")
        print(f"  状态: {result['status']}")
        print(f"  错误类型: {result['error_type']}")
        print(f"  错误信息: {result['error_message']}")
        print("==============================\n")
        
        # 验证结果
        self.assertEqual(result['status'], 'error')
        self.assertEqual(result['error_type'], 'unknown_error')
        self.assertIn('API请求失败', result['error_message'])
        mock_session_instance.post.assert_called_once()
    
    def test_set_api_key(self):
        """测试设置API密钥功能"""
        client = LLMClient()
        new_api_key = 'new_test_api_key'
        
        client.set_api_key(new_api_key)
        
        # 验证API密钥已更新
        self.assertEqual(client.config['api_key'], new_api_key)
        self.assertEqual(client.session.headers['Authorization'], f'Bearer {new_api_key}')
    
    def test_close(self):
        """测试关闭会话功能"""
        client = LLMClient()
        # 保存原始close方法
        original_close = client.session.close
        # 创建模拟close方法
        mock_close = MagicMock()
        client.session.close = mock_close
        
        client.close()
        
        # 验证close方法被调用
        mock_close.assert_called_once()
        # 恢复原始close方法
        client.session.close = original_close
    
    def test_create_llm_client(self):
        """测试创建LLM客户端的便捷函数"""
        config = {'api_key': 'test_key'}
        client = create_llm_client(config)
        
        self.assertIsInstance(client, LLMClient)
        self.assertEqual(client.config['api_key'], 'test_key')


if __name__ == '__main__':
    unittest.main()