from typing import Dict, Optional, List
import os
import json
from openai import OpenAI
from ollama import AsyncClient as OllamaClient
from config import ROOT_DIR
from dataclasses import dataclass
from dotenv import load_dotenv, set_key
from logger import logger

# 常量定义
DEFAULT_PROVIDER = "openai"
DEFAULT_MODEL = "gpt-3.5-turbo"
DEFAULT_API_BASE = "https://api.openai.com/v1"
DEFAULT_OLLAMA_API_BASE = "http://localhost:11434"
CONFIG_FILE = "config.json"
ENV_FILE = ".env"

@dataclass
class AIConfig:
    provider: str
    api_key: str
    api_base: str
    model: str = ""

class AIService:
    def __init__(self):
        # 保存当前使用的配置文件路径
        self.active_config_path = None
        self.active_env_path = None
        
        # 初始化配置
        self.config = self._load_config()
        self.client = None
        self._initialize_client()
    
    def _find_config_files(self):
        """确定配置文件位置"""
        # 使用项目根目录的配置文件
        self.active_config_path = os.path.join(ROOT_DIR, CONFIG_FILE)
        self.active_env_path = os.path.join(ROOT_DIR, ENV_FILE)
    
    def _load_config(self) -> AIConfig:
        """从配置文件加载AI设置"""
        logger.info("开始加载AI配置")
        
        # 找到配置文件位置
        self._find_config_files()
        
        # 加载.env文件
        if os.path.exists(self.active_env_path):
            load_dotenv(self.active_env_path)
        
        # 创建默认配置
        default_config = {
            "provider": DEFAULT_PROVIDER,
            "model": DEFAULT_MODEL
        }
        
        try:
            # 如果配置文件不存在，创建默认配置
            if not os.path.exists(self.active_config_path):
                os.makedirs(os.path.dirname(self.active_config_path), exist_ok=True)
                with open(self.active_config_path, 'w', encoding='utf-8') as f:
                    json.dump(default_config, f, indent=2)
                    
            # 读取现有配置
            with open(self.active_config_path, 'r', encoding='utf-8') as f:
                config_data = json.load(f)
            
            # 从.env文件中获取API密钥和基础URL
            return AIConfig(
                provider=config_data.get("provider", DEFAULT_PROVIDER),
                api_key=os.getenv("OPENAI_API_KEY", ""),
                api_base=os.getenv("OPENAI_API_BASE", DEFAULT_API_BASE),
                model=config_data.get("model", DEFAULT_MODEL)
            )
                
        except Exception as e:
            error_msg = f"加载配置失败: {str(e)}"
            logger.error(error_msg)
            return AIConfig(
                provider=DEFAULT_PROVIDER,
                api_key=os.getenv("OPENAI_API_KEY", ""),
                api_base=os.getenv("OPENAI_API_BASE", DEFAULT_API_BASE),
                model=DEFAULT_MODEL
            )
    
    def _save_config(self, config_path=None, env_path=None):
        """保存配置到文件"""
        try:
            # 使用传入的配置文件路径，如果没有则使用当前激活的路径
            config_file = config_path or self.active_config_path
            env_file = env_path or self.active_env_path
            
            # 保存配置到config.json
            config_data = {
                "provider": self.config.provider,
                "model": self.config.model
            }
            
            # 确保目标目录存在
            os.makedirs(os.path.dirname(config_file), exist_ok=True)
            
            with open(config_file, 'w', encoding='utf-8') as f:
                json.dump(config_data, f, indent=2)
            
            # 更新.env文件
            os.makedirs(os.path.dirname(env_file), exist_ok=True)
            set_key(env_file, "OPENAI_API_KEY", self.config.api_key)
            set_key(env_file, "OPENAI_API_BASE", self.config.api_base)
                
        except Exception as e:
            error_msg = f"保存配置失败: {str(e)}"
            logger.error(error_msg)
    
    def _initialize_client(self):
        """初始化AI客户端"""
        logger.info("初始化AI客户端")
        # 强制重新创建客户端实例
        self.client = None
        
        try:
            # 根据配置创建新的客户端连接
            if self.config.provider == "ollama":
                base_url = self.config.api_base or DEFAULT_OLLAMA_API_BASE
                self.client = OllamaClient(host=base_url)
            else:
                if not self.config.api_key:
                    logger.error("缺少API密钥")
                    return
                self.client = OpenAI(
                    api_key=self.config.api_key,
                    base_url=self.config.api_base
                )
        except Exception as e:
            logger.error(f"初始化AI客户端失败: {str(e)}")
            
    def update_config(self, provider: str, api_key: str, api_base: str, model: str, config_path: str = None, env_path: str = None):
        """更新AI配置
        
        Args:
            provider: AI供应商
            api_key: API密钥
            api_base: API基础URL
            model: 模型名称
            config_path: 配置文件路径，如果为None则使用默认路径
            env_path: 环境变量文件路径，如果为None则使用默认路径
        """
        self.config = AIConfig(
            provider=provider,
            api_key=api_key,
            api_base=api_base,
            model=model
        )
        self._save_config(config_path, env_path)
        self.reload_env()
        self.__init__()
    
    async def send_message(self, 
                          message: str, 
                          conversation_history: List[Dict],
                          on_reasoning=None) -> Optional[str]:
        """发送消息到AI服务"""
        if not self.client:
            return "请先配置AI服务"
            
        try:
            logger.info(conversation_history)

            # 调用相应的AI服务
            if self.config.provider == "openai":
                response = await self._call_openai(
                    message, 
                    conversation_history,
                    on_reasoning
                )
                return response
            elif self.config.provider == "ollama":
                response = await self._call_ollama(
                    message, 
                    conversation_history,
                    on_reasoning
                )
                return response
            else:
                return f"未知的AI供应商: {self.config.provider}"
                
        except Exception as e:
            return f"AI服务错误: {str(e)}"
    
    async def _call_openai(self, 
                          message: str, 
                          conversation_history: List[Dict],
                          on_reasoning=None) -> str:
        """调用OpenAI API"""
        try:
            stream = self.client.chat.completions.create(
                model=self.config.model,
                messages=conversation_history + [{"role": "user", "content": message}],
                stream=True
            )
            
            reasoning_started = False
            reasoning_content = ""
            final_content = ""
            for chunk in stream:
                if chunk.choices[0].delta.reasoning_content:
                    content = chunk.choices[0].delta.reasoning_content
                    if not reasoning_started:
                        reasoning_started = True
                    reasoning_content += content
                elif chunk.choices[0].delta.content:
                    content = chunk.choices[0].delta.content
                    if reasoning_started:
                        reasoning_started = False
                    final_content += content
            
            if on_reasoning and reasoning_content:
                on_reasoning(reasoning_content)

            return final_content

        except Exception as e:
            error_msg = f"OpenAI API错误: {str(e)}"
            logger.error(error_msg)
            return error_msg

    async def _call_ollama(self, message: str, conversation_history: List[Dict], on_reasoning=None) -> str:
        """调用Ollama API"""
        try:
            if not isinstance(self.client, OllamaClient):
                return "Ollama客户端未正确初始化"

            # 转换消息历史格式
            formatted_messages = conversation_history + [{"role": "user", "content": message}]
            
            reasoning_started = False
            reasoning_content = ""
            final_content = ""

            # 使用ollama-python的原生流式API
            async for chunk in await self.client.chat(
                model=self.config.model or "llama2",
                messages=formatted_messages,
                stream=True,
            ):
                if chunk.message.content:
                    content = chunk.message.content

                    if content.startswith("<think>"):  # 处理推理开始
                        reasoning_started = True

                    if reasoning_started:  # 处理推理内容
                        reasoning_content += content
                    else:  # 处理回复
                        final_content += content

                    if content.endswith("</think>"):  # 处理推理结束
                        reasoning_started = False
                        # 发送完整的推理内容
                        if on_reasoning and reasoning_content:
                            on_reasoning(reasoning_content)
                        continue

            return final_content

        except Exception as e:
            error_msg = f"Ollama API错误: {str(e)}"
            logger.error(error_msg)
            return error_msg

    def reload_env(self):
        """重新加载环境变量"""
        if os.path.exists(self.active_env_path):
            # 清除现有环境变量
            for key in list(os.environ.keys()):
                if key.startswith('OPENAI_API'):  # 只清除应用相关的环境变量
                    del os.environ[key]
            
            # 重新加载.env文件
            load_dotenv(self.active_env_path, override=True)
