import asyncio
import json
import uuid
from typing import AsyncGenerator, Dict, Any, List
import httpx
from openai import AsyncOpenAI


class OllamaChatClient:
    def __init__(self, model: str = "deepseek-r1:7b", base_url: str = "http://localhost:11434"):
        self.model = model
        self.base_url = base_url

    async def chat_stream(self, messages: List[Dict[str, str]], **kwargs) -> AsyncGenerator[str, None]:
        """
        使用聊天模式流式生成响应

        Args:
            messages: 消息列表，每个消息是一个字典，包含 'role' 和 'content'
            **kwargs: 其他参数，如 temperature, top_p 等

        Yields:
            模型生成的文本块
        """
        api_url = f"{self.base_url}/api/chat"

        payload = {
            "model": self.model,
            "messages": messages,
            "stream": True,
            "options": {
                "temperature": 0.7,
                "top_p": 0.9,
                "top_k": 40,
            }
        }

        # 更新选项
        payload["options"].update(kwargs.get("options", {}))

        try:
            async with httpx.AsyncClient() as client:
                async with client.stream('POST', api_url, json=payload, timeout=60.0) as response:
                    response.raise_for_status()

                    async for line in response.aiter_lines():
                        if line.strip():
                            try:
                                data = json.loads(line)
                                if "message" in data:
                                    content = data["message"].get("content", "")
                                    if content:
                                        yield content
                                # 检查是否结束
                                if data.get("done", False):
                                    break
                            except json.JSONDecodeError:
                                continue
        except httpx.ConnectError:
            yield "错误：无法连接到Ollama服务，请确保Ollama正在运行"
        except httpx.TimeoutException:
            yield "错误：请求超时，请稍后重试"
        except Exception as e:
            yield f"请求错误: {str(e)}"


class VLLMChatClient:
    def __init__(self, model: str = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
                 base_url: str = "http://localhost:8000/v1"):
        self.model = model
        self.base_url = base_url
        self.api_key = "EMPTY"

        # 初始化 AsyncOpenAI 客户端
        self.client = AsyncOpenAI(
            base_url=base_url,
            api_key=self.api_key
        )

    async def chat_stream(self, messages: List[Dict[str, str]], **kwargs) -> AsyncGenerator[str, None]:
        """
        使用 vLLM 的 OpenAI 兼容 API 流式生成响应
        保持与原有 Ollama 代码相同的接口
        """
        try:
            # 准备参数
            request_params = {
                "model": self.model,
                "messages": messages,
                "stream": True,
                "temperature": kwargs.get("temperature", 0.7),
                "max_tokens": kwargs.get("max_tokens", 512),
                "top_p": kwargs.get("top_p", 0.9),
            }

            # 添加其他可选参数
            for key in ["frequency_penalty", "presence_penalty", "stop"]:
                if key in kwargs:
                    request_params[key] = kwargs[key]

            # 使用 AsyncOpenAI 进行流式请求
            stream = await self.client.chat.completions.create(**request_params)

            # 处理流式响应
            async for chunk in stream:
                if chunk.choices[0].delta.content is not None:
                    yield chunk.choices[0].delta.content

        except Exception as e:
            yield f"请求错误: {str(e)}"


# 创建全局聊天客户端,Ollama模式
chat_client = OllamaChatClient()
# 创建全局聊天客户端,VLLM模式
# chat_client = VLLMChatClient()

async def generate_chat_stream(messages: List[Dict[str, str]], **kwargs) -> AsyncGenerator[str, None]:
    """
    使用聊天模式流式生成响应

    Args:
        messages: 消息列表，每个消息包含 'role' 和 'content'
        **kwargs: 其他参数

    Yields:
        模型生成的文本块
    """
    async for chunk in chat_client.chat_stream(messages, **kwargs):
        yield chunk


async def generate_chat_single(messages: List[Dict[str, str]], **kwargs) -> str:
    """
    使用聊天模式非流式生成响应

    Args:
        messages: 消息列表
        **kwargs: 其他参数

    Returns:
        完整的模型响应
    """
    full_response = ""
    async for chunk in generate_chat_stream(messages, **kwargs):
        full_response += chunk
    return full_response


# 会话管理类（使用消息列表格式）
class SessionManager:
    def __init__(self):
        self.sessions: Dict[str, List[Dict[str, str]]] = {}

    def create_session(self, session_id: str = None) -> str:
        """创建新会话"""
        if session_id is None:
            session_id = str(uuid.uuid4())
        self.sessions[session_id] = []
        return session_id

    def add_message(self, session_id: str, content: str, role: str):
        """添加消息到会话"""
        if session_id not in self.sessions:
            self.create_session(session_id)

        self.sessions[session_id].append({
            "role": role,
            "content": content
        })

    def get_session_messages(self, session_id: str) -> List[Dict[str, str]]:
        """获取会话的所有消息"""
        return self.sessions.get(session_id, [])

    def clear_session(self, session_id: str):
        """清空会话历史"""
        if session_id in self.sessions:
            self.sessions[session_id] = []

    def delete_session(self, session_id: str):
        """删除会话"""
        if session_id in self.sessions:
            del self.sessions[session_id]


# 全局会话管理器
session_manager = SessionManager()


async def generate_with_session(session_id: str, user_message: str, system_prompt: str = None, **kwargs) -> \
AsyncGenerator[str, None]:
    """
    使用会话管理生成响应（聊天模式）

    Args:
        session_id: 会话ID
        user_message: 用户消息
        system_prompt: 系统提示词（可选）
        **kwargs: 其他参数

    Yields:
        模型生成的文本块
    """
    # 获取会话历史
    messages = session_manager.get_session_messages(session_id)

    # 如果有系统提示词，且会话历史为空，则添加系统提示词
    if system_prompt and not messages:
        messages.append({"role": "system", "content": system_prompt})

    # 添加用户消息
    messages.append({"role": "user", "content": user_message})

    # 生成响应
    full_response = ""
    async for chunk in generate_chat_stream(messages, **kwargs):
        full_response += chunk
        yield chunk

    # 将用户消息和助手响应添加到会话历史
    session_manager.add_message(session_id, user_message, role="user")
    session_manager.add_message(session_id, full_response, role="assistant")


# 测试函数
async def test_chat_client():
    """测试聊天客户端功能"""
    print("测试聊天模式...")
    messages = [
        {"role": "user", "content": "你好，请介绍一下Python"}
    ]
    async for chunk in generate_chat_stream(messages):
        print(chunk, end="", flush=True)
    print("\n")

    print("测试会话管理...")
    session_id = session_manager.create_session()
    print(f"创建会话: {session_id}")

    print("第一次提问:")
    async for chunk in generate_with_session(session_id, "你好，我叫小明"):
        print(chunk, end="", flush=True)
    print("\n")

    print("第二次提问（应该有上下文）:")
    async for chunk in generate_with_session(session_id, "我刚才说我叫什么名字？"):
        print(chunk, end="", flush=True)
    print("\n")

    print("会话历史:")
    for msg in session_manager.get_session_messages(session_id):
        print(f"{msg['role']}: {msg['content']}")


if __name__ == "__main__":
    asyncio.run(test_chat_client())