import os
import json
import logging
from dotenv import load_dotenv
from openai import OpenAI

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(level=logging.INFO, 
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    handlers=[
                        logging.FileHandler("deepseek_api.log", encoding="utf-8"),
                        logging.StreamHandler()
                    ])
logger = logging.getLogger(__name__)

# 获取API密钥
DEEPSEEK_API_KEY = os.getenv('DEEPSEEK_API_KEY')
DEEPSEEK_API_URL = os.getenv('DEEPSEEK_API_URL', 'https://api.deepseek.com')

class DeepseekAPI:
    def __init__(self):
        if not DEEPSEEK_API_KEY:
            logger.warning('未找到Deepseek API密钥，请在.env文件中设置DEEPSEEK_API_KEY')
        
        self.client = OpenAI(
            api_key=DEEPSEEK_API_KEY,
            base_url=DEEPSEEK_API_URL  # 官方文档推荐的基础URL
        )
    
    def chat_completion(self, messages, model="deepseek-chat", max_tokens=2048, stream=False):
        """执行聊天完成请求
        
        Args:
            messages: 对话历史消息列表
            model: 使用的模型，可选值: 'deepseek-chat'(V3模型) 或 'deepseek-reasoner'(R1推理模型)
            max_tokens: 最大输出token数，范围1-8192
            stream: 是否使用流式输出
            
        Returns:
            如果stream=False，返回完整响应
            如果stream=True，返回流式响应生成器
        """
        try:
            # 检查API密钥
            if not DEEPSEEK_API_KEY or DEEPSEEK_API_KEY == 'your_deepseek_key_here':
                logger.warning('Deepseek API密钥未正确配置，返回模拟响应')
                if not stream:
                    return self._mock_completion(messages)
                else:
                    return self._mock_stream_completion(messages)
            
            # 验证模型参数
            if model not in ["deepseek-chat", "deepseek-reasoner"]:
                logger.warning(f'不支持的模型: {model}，将使用默认模型 deepseek-chat')
                model = "deepseek-chat"
            
            # 验证max_tokens参数
            if max_tokens < 1 or max_tokens > 8192:
                logger.warning(f'max_tokens超出范围: {max_tokens}，将使用默认值2048')
                max_tokens = 2048
                
            logger.info(f'调用DeepSeek API，模型: {model}, 流式输出: {stream}')
            
            # 使用openai客户端调用API
            response = self.client.chat.completions.create(
                model=model,
                messages=messages,
                max_tokens=max_tokens,
                stream=stream
                # 注意：根据文档，temperature、top_p等参数不会生效
            )
            
            logger.info('API调用成功')
            return response
            
        except Exception as e:
            logger.error(f'调用DeepSeek API失败: {str(e)}')
            raise

    def review_document(self, content, prompt_template=None):
        """使用deepseek-reasoner模型审核文档内容
        
        Args:
            content: 需要审核的文档内容
            prompt_template: 提示词模板，如果为None则使用默认模板
            
        Returns:
            审核结果和思考过程
        """
        try:
            if not prompt_template:
                prompt_template = """请审核以下文档内容是否符合要求，并给出详细分析：

{content}

分析结果请包含以下部分：
1. 主要内容摘要
2. 是否符合规范要求
3. 改进建议（如有）
"""
            
            prompt = prompt_template.format(content=content)
            
            messages = [
                {"role": "user", "content": prompt}
            ]
            
            # 使用推理模型进行文档审核
            response = self.chat_completion(
                messages=messages,
                model="deepseek-reasoner",  # 使用R1推理模型
                max_tokens=4096,
                stream=False
            )
            
            # 提取审核结果和思考过程
            if hasattr(response, 'choices') and len(response.choices) > 0:
                content = response.choices[0].message.content
                return {
                    "result": content,
                    "thinking": "分析过程已包含在结果中"
                }
            else:
                return {
                    "result": "审核失败，无法获取结果",
                    "thinking": "API响应格式异常"
                }
                
        except Exception as e:
            logger.error(f'文档审核失败: {str(e)}')
            return {
                "result": f"审核过程发生错误: {str(e)}",
                "thinking": "API调用失败"
            }
    
    def _mock_completion(self, messages):
        """模拟API响应（用于API密钥未配置时）"""
        user_message = messages[-1]['content'] if messages else "空消息"
        logger.warning(f'使用模拟响应代替实际API调用，用户消息: {user_message[:30]}...')
        
        # 创建一个模拟的响应对象
        class MockResponse:
            def __init__(self, content):
                self.choices = [MockChoice(content)]
                
        class MockChoice:
            def __init__(self, content):
                self.message = MockMessage(content)
                
        class MockMessage:
            def __init__(self, content):
                self.content = content
        
        return MockResponse(f"这是一个模拟响应。\n您的消息是：{user_message}\n模拟回答：这是一个模拟回答。")

    def _mock_stream_completion(self, messages):
        """生成模拟的流式完成响应，返回一个生成器"""
        user_message = messages[-1]['content'] if messages and messages[-1]['role'] == 'user' else ''
        
        # 模拟思维链内容
        mock_reasoning = f"思考分析问题：'{user_message}'\n\n我需要理解用户的问题并给出合适的回答。让我分析一下。\n\n此问题涉及到...\n\n经过分析我认为..."
        
        # 模拟最终答案
        mock_content = f"您好！关于'{user_message}'的问题，我的回答是...请问还有其他问题吗？"
        
        # 将响应分解为小块
        reasoning_chunks = [mock_reasoning[i:i+5] for i in range(0, len(mock_reasoning), 5)]
        content_chunks = [mock_content[i:i+5] for i in range(0, len(mock_content), 5)]
        
        # 创建模拟的流式响应类
        class MockDelta:
            def __init__(self, content=None, reasoning_content=None):
                self.content = content
                self.reasoning_content = reasoning_content
                
        class MockChoice:
            def __init__(self, delta):
                self.delta = delta
                
        class MockChunk:
            def __init__(self, choices):
                self.choices = choices
        
        # 首先流式输出思维链内容
        for chunk in reasoning_chunks:
            delta = MockDelta(reasoning_content=chunk)
            choice = MockChoice(delta)
            yield MockChunk([choice])
        
        # 然后流式输出最终内容
        for chunk in content_chunks:
            delta = MockDelta(content=chunk)
            choice = MockChoice(delta)
            yield MockChunk([choice])

# 为直接运行提供测试功能
if __name__ == "__main__":
    # 测试非流式API
    api = DeepseekAPI()
    messages = [{"role": "user", "content": "9.11和9.8哪个更大？"}]
    
    # 测试非流式响应
    response = api.chat_completion(messages)
    print("=== 非流式响应 ===")
    print(f"思维链: {response.choices[0].message.reasoning_content}")
    print(f"最终回答: {response.choices[0].message.content}")
    
    # 测试流式响应
    print("\n=== 流式响应 ===")
    stream_response = api.chat_completion(messages, stream=True)
    reasoning_content = ""
    content = ""
    
    for chunk in stream_response:
        if chunk.choices[0].delta.reasoning_content:
            reasoning_content += chunk.choices[0].delta.reasoning_content
            print("思维链更新: " + chunk.choices[0].delta.reasoning_content, end="")
        elif chunk.choices[0].delta.content:
            content += chunk.choices[0].delta.content
            print("\n内容更新: " + chunk.choices[0].delta.content, end="")
    
    print("\n\n最终思维链:", reasoning_content)
    print("最终内容:", content)
