import os
import requests
import certifi
import datetime
import logging
import json
from abc import ABC, abstractmethod
from dotenv import load_dotenv

logger = logging.getLogger(__name__)

class BaseLLMInterface(ABC):
    """LLM接口基类"""
    def __init__(self):
        self.thinking_depth = "balanced"  # balanced/deep/reasoning
        self.reasoning_steps = 5  # Default to 5 steps for better reasoning
        self.enable_cot = True  # Enable CoT by default for better reasoning
        self.temperature = 0.7  # Default temperature for creativity
        self.top_p = 0.95  # Slightly higher top_p for better diversity
        self.temperature  = "1.0"  # Default scenario
        self.scenario_temps = {
            "code": 0.0,       # 代码生成/数学解题
            "analysis": 1.0,   # 数据抽取/分析
            "general": 1.3,    # 通用对话
            "translation": 1.3, # 翻译
            "creative": 1.5    # 创意类写作/诗歌创作
        }
        
    @abstractmethod
    def generate_text(self, prompt):
        """生成文本的抽象方法"""
        pass
        
    def set_temperature(self, temperature):
        set_temperature = float(temperature)
        if set_temperature < 0.0 or set_temperature > 1.5:
            raise ValueError(f"温度值必须在0.0-1.5之间: {set_temperature}")
        return True
        
    def set_thinking_depth(self, depth):
        """设置思考深度
        Args:
            depth (str): 可选值: 
                - 'balanced': 平衡模式，适合一般任务
                - 'deep': 深度模式，适合复杂分析
                - 'reasoning': 推理模式，适合逻辑推理任务
        """
        if depth in ["balanced", "deep", "reasoning"]:
            self.thinking_depth = depth
            # Auto-adjust reasoning steps based on depth
            self.reasoning_steps = {
                "balanced": 5,
                "deep": 7,
                "reasoning": 10
            }[depth]
            logger.info(f"思考深度设置为: {depth}, 自动调整推理步数为: {self.reasoning_steps}")
            return True
        logger.warning(f"无效的思考深度设置: {depth}")
        return False
        
    def set_reasoning_steps(self, steps):
        """设置推理步数
        Args:
            steps (int): 推理步数 (1-10)
        """
        if isinstance(steps, int) and 1 <= steps <= 10:
            self.reasoning_steps = steps
            logger.info(f"推理步数设置为: {steps}")
            return True
        logger.warning(f"无效的推理步数设置: {steps} (必须在1-10之间)")
        return False
        
    def set_enable_cot(self, enable):
        """设置是否启用思维链"""
        self.enable_cot = bool(enable)
        return True

class DeepseekInterface(BaseLLMInterface):
    """Deepseek API接口实现"""
    def __init__(self):
        super().__init__()
        load_dotenv()
        self.api_key = os.getenv("DEEPSEEK_API_KEY")
        self.api_base = "https://api.deepseek.com/v1"
        self.current_model = "deepseek-chat"
        self.temperature = "1.0"
        self.available_models = {
            "deepseek-chat": {
                "description": "通用聊天模型",
                "max_tokens": 5096,
                "supports_cot": True,
                "default_temperature": 0.7
            },
            "deepseek-reasoner": {
                "description": "推理专用模型", 
                "max_tokens": 9192,
                "supports_cot": True,
                "default_temperature": 0.5
            }
        }
    

    
    def set_model(self, model_name):
        """设置当前使用的模型
        Args:
            model_name (str): 模型名称 (deepseek-chat 或 deepseek-reasoner)
        """
        if model_name in self.available_models:
            self.current_model = model_name
            # 根据模型类型自动调整配置
            model_info = self.available_models[model_name]
            if model_name == "deepseek-reasoner":
                self.thinking_depth = "reasoning"
                self.reasoning_steps = 7
                self.enable_cot = True
                logger.info(f"切换到推理模型，自动调整配置: 思考深度=reasoning, 推理步数=7, 启用思维链")
            else:
                self.thinking_depth = "balanced"
                self.reasoning_steps = 5
                self.enable_cot = False
                logger.info(f"切换到聊天模型，自动调整配置: 思考深度=balanced, 推理步数=5, 禁用思维链")
            return True
        logger.warning(f"无效的模型名称: {model_name}")
        return False
    
    def get_available_models(self):
        """获取可用的模型列表"""
        return self.available_models
    
    def generate_text(self, prompt, **kwargs):
        """使用Deepseek API生成文本"""
        logger.debug(f"生成文本 - 提示词长度: {len(prompt)}")
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.api_key}"
            }
            
            # 验证配置参数
            if self.thinking_depth not in ["balanced", "deep", "reasoning"]:
                raise ValueError(f"无效的思考深度: {self.thinking_depth}")
                
            if self.reasoning_steps < 1 or self.reasoning_steps > 10:
                raise ValueError(f"推理步数必须在1-10之间: {self.reasoning_steps}")
                
            # 检查模型是否支持当前配置
            model_info = self.available_models[self.current_model]
            if self.enable_cot and not model_info["supports_cot"]:
                raise ValueError(f"当前模型不支持思维链: {self.current_model}")
            
            # 检查提示词长度是否超过模型限制
            if len(prompt) > model_info["max_tokens"]:
                raise ValueError(f"提示词长度超过模型限制: {len(prompt)} > {model_info['max_tokens']}")
            
            # 自动调整推理步数
            if self.thinking_depth == "reasoning" and self.reasoning_steps < 5:
                self.reasoning_steps = 5
                logger.info(f"深度推理模式自动调整推理步数为: {self.reasoning_steps}")
            
            data = {
                "model": self.current_model,
                "messages": [{"role": "user", "content": prompt}],
                "temperature": float(self.temperature),
                "top_p": 0.9,
                "frequency_penalty": 0.0,
                "presence_penalty": 0.0,
                "stream": False,
                "max_tokens": self.available_models[self.current_model]["max_tokens"] - len(prompt)  # 动态计算最大token
            }
            
            # 根据模型类型调整温度
            if self.current_model == "deepseek-reasoner":
                data["temperature"] = 0.5  # 降低温度以获得更稳定的推理结果
                logger.info(f"推理模型自动调整温度为: 0.5")
            
            # 添加调试信息
            logger.debug(f"请求参数: {json.dumps(data, indent=2, ensure_ascii=False)}")
            
            # 覆盖默认参数
            for key, value in kwargs.items():
                data[key] = value
            
            response = requests.post(
                f"{self.api_base}/chat/completions",
                headers=headers,
                json=data,
                timeout=60,
                verify=certifi.where()
            )
            
            if response.status_code == 200:
                result = response.json()["choices"][0]["message"]["content"]
                logger.debug(f"DeepSeek响应前100字符: {result[:100]}...")
                
                # 保存完整响应到调试文件
                current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
                debug_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "debug")
                os.makedirs(debug_dir, exist_ok=True)
                
                debug_file = os.path.join(debug_dir, f"deepseek_response_{current_time}.txt")
                with open(debug_file, "w", encoding="utf-8") as f:
                    f.write(f"提示词:\n{prompt}\n\n响应:\n{result}")
                logger.info(f"已保存完整的DeepSeek响应到: {debug_file}")
                
                return result
            else:
                raise Exception(f"API请求失败: {response.status_code} - {response.text}")
                
        except Exception as e:
            logger.error(f"生成文本时出错: {str(e)}", exc_info=True)
            raise

class LLMFactory:
    """LLM工厂类"""
    @staticmethod
    def create_llm(llm_type="deepseek"):
        """创建LLM接口实例"""
        if llm_type.lower() == "deepseek":
            return DeepseekInterface()
        # 可以在这里添加其他LLM的实现
        raise ValueError(f"不支持的LLM类型: {llm_type}")

# 默认使用Deepseek接口
class LLMInterface:
    def __init__(self):
        self.llm = LLMFactory.create_llm()
    
    def set_model(self, model_name):
        return self.llm.set_model(model_name)
    
    def set_temperature(self, temperature):
        return self.llm.set_temperature(temperature)

    def get_available_models(self):
        return self.llm.get_available_models()
    
    def generate_text(self, prompt):
        """生成文本"""
        return self.llm.generate_text(prompt)
        
    def set_thinking_depth(self, depth):
        return self.llm.set_thinking_depth(depth)
        
    def set_reasoning_steps(self, steps):
        return self.llm.set_reasoning_steps(steps)
        
    def set_enable_cot(self, enable):
        return self.llm.set_enable_cot(enable)
