import asyncio
import json
from collections import defaultdict
from typing import Dict, List
from tanxi.client.Handler.Logger import log_info, log_error
from openai import AsyncOpenAI, OpenAIError
# 基础配置
from demoPython.config.BaseConfig import *


class AsyncChatManager:
    def __init__(self):
        # 用户对话历史 {user_id: List[dict]}
        self.user_chat_histories: Dict[str, List[dict]] = defaultdict(list)
        
        # 用户对话锁 {user_id: asyncio.Lock}
        self.user_locks: Dict[str, asyncio.Lock] = defaultdict(asyncio.Lock)
        
        # 流式响应任务跟踪 {user_id: asyncio.Task}
        self.active_tasks: Dict[str, asyncio.Task] = {}

    async def add_message(self, user_id: str, role: str, content: str):
        """添加消息到用户对话历史"""
        async with self.user_locks[user_id]:
            self.user_chat_histories[user_id].append({
                "role": role,
                "content": content,
                "timestamp": asyncio.get_event_loop().time()
            })
            
            # 限制历史记录长度
            if len(self.user_chat_histories[user_id]) > 20:
                self.user_chat_histories[user_id] = self.user_chat_histories[user_id][-20:]

    async def get_chat_history(self, user_id: str) -> List[dict]:
        """获取用户对话历史"""
        async with self.user_locks[user_id]:
            return self.user_chat_histories.get(user_id, [])
    
    async def clear_history(self, user_id: str):
        """清空用户对话历史"""
        async with self.user_locks[user_id]:
            self.user_chat_histories[user_id] = []

    async def stream_response(self, user_id: str, model_name: str, user_input: str):
        """
        流式调用LLM并管理响应
        """
        if user_id in self.active_tasks:
            task = self.active_tasks[user_id]
            if not task.done():
                task.cancel()
                try:
                    await task
                except asyncio.CancelledError:
                    log_info(f"Cancelled previous task for user {user_id}")

        await self.add_message(user_id, "user", user_input)
        history = await self.get_chat_history(user_id)
        log_info(f"获取到的历史: {history}  调用模型{model_name}")

        try:
            async for chunk in self._stream_llm_response(user_id, model_name, history):
                yield chunk
        except Exception as e:
            log_error(f"Error in streaming for user {user_id}: {e}")
            yield {"error": str(e)}
        finally:
            if user_id in self.active_tasks:
                del self.active_tasks[user_id]

    async def _stream_llm_response(self, user_id: str, model_name: str, history: List[dict]):
        """
        实际的流式LLM调用实现
        """
        try:
            # 获取 client 和 model，确保返回值有效
            client, model = await self.model_choose(model_name)
            if not client or not model:
                raise ValueError(f"Invalid client or model for {model_name}")

            # 准备消息格式
            messages = [{"role": msg["role"], "content": msg["content"]} 
                    for msg in history]
            
            # 流式调用
            full_response = ""
            try:
                stream = await client.chat.completions.create(
                    model=model,
                    messages=messages,
                    stream=True
                )
            except Exception as e:
                raise ConnectionError(f"Failed to connect to {model_name} service: {str(e)}")

            async for chunk in stream:
                content = chunk.choices[0].delta.content
                if content:
                    full_response += content
                    yield {
                        "user_id": user_id,
                        "content": content,
                        "complete": False
                    }
            
            # 添加AI响应到历史
            await self.add_message(user_id, "assistant", full_response)
            
            yield {
                "user_id": user_id,
                "content": "",
                "complete": True,
                "full_response": full_response
            }
            
        except ConnectionError as e:
            log_error(f"LLM streaming error for user {user_id}: {e}")
            await self.add_message(user_id, "system", f"Error: {str(e)}")
            yield {
                "user_id": user_id,
                "content": "",
                "complete": True,
                "error": str(e)
            }
        except Exception as e:
            log_error(f"LLM streaming error for user {user_id}: {e}")
            await self.add_message(user_id, "system", f"Error: {str(e)}")
            yield {
                "user_id": user_id,
                "content": "",
                "complete": True,
                "error": str(e)
            }

    async def model_choose(self, model_name: str):
        """异步模型选择方法"""
        # ... (保持原有的模型选择逻辑，但改为异步)
        log_info(f"选择模型 {model_name}")
        model_name = str(model_name).lower()

        if model_name == "deepseek":
            client = AsyncOpenAI(
                api_key=deepseek_api_key,
                base_url="https://api.deepseek.com",
            )
            model = "deepseek-chat"

        
        elif model_name == "ollama":
            client = AsyncOpenAI(
                api_key="ollama",
                base_url="http://192.168.0.106:11434/v1",
            )
            model = "deepseek-r1:7b"


        elif model_name == "doubao":
            # 构造 client
            client = AsyncOpenAI(
                # api_key="75924dd9-eb8e-4a7c-a84e-f7acee5b6f8b",
                api_key=doubao_api_key,
                base_url="https://ark.cn-beijing.volces.com/api/v3",
            )
            model = "ep-20241120160619-qjb5v"  # 模型接入点

        # hunyuan 
        elif model_name == "hunyuan":
            # 构造 client
            client = AsyncOpenAI(
                # api_key="sk-KMSu4IsUH79RbCdiqZXJROl9lLHu3RcIAzqQrfrRv9tLPyQG",  # 混元 APIKey
                api_key=hunyuan_api_key,
                base_url="https://api.hunyuan.cloud.tencent.com/v1",  # 混元 endpoint
            )
            model="hunyuan-turbo"
        
        # tongyi
        elif model_name == "tongyi":
            client =AsyncOpenAI(
                # api_key= "sk-dbb9cbbf616a4be6abb779c0e00621c9",  # tongyi  APIKey
                api_key=tongyi_api_key,
                base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # tongyi
            )
            model="qwen-turbo"

        else:
            print("暂不支持此模型")

        log_info(f"传回模型 {client} {model}")
        return client, model