from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import ChatMessage
from sqlalchemy.orm import Session
from models import Conversation, Message, User, MessageRole
from rag_pro11.rag.llms import deepseek_llm, moonshot_llm
from typing import Optional, List, Dict
from fastapi.responses import StreamingResponse
import asyncio


class ChatService:
    def __init__(self):
        # 初始化默认LLM
        self.llm = deepseek_llm()

    def set_model(self, model_name: str):
        """切换模型"""
        if model_name == "Moonshot":
            self.llm = moonshot_llm()
        else:
            self.llm = deepseek_llm()

    def create_conversation(self, db: Session, user_id: int, title: Optional[str] = None) -> Conversation:
        """创建新会话"""
        title = title or "新会话"
        conversation = Conversation(user_id=user_id, title=title)
        db.add(conversation)
        db.commit()
        db.refresh(conversation)
        return conversation

    def get_conversations(self, db: Session, user_id: int) -> List[Conversation]:
        """获取用户所有会话"""
        return db.query(Conversation).filter(Conversation.user_id == user_id).order_by(
            Conversation.updated_at.desc()).all()

    def get_conversation_messages(self, db: Session, conversation_id: int) -> List[Message]:
        """获取会话消息"""
        return db.query(Message).filter(Message.conversation_id == conversation_id).order_by(Message.created_at).all()

    async def chat(self, db: Session, conversation_id: int, user_message: str) -> Dict:
        """处理聊天消息"""
        # 获取会话
        conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first()
        if not conversation:
            raise ValueError("会话不存在")

        # 保存用户消息
        user_msg_db = Message(
            conversation_id=conversation_id,
            content=user_message,
            role=MessageRole.user
        )
        db.add(user_msg_db)
        db.commit()

        # 加载历史消息到内存
        history_messages = self.get_conversation_messages(db, conversation_id)
        chat_history = [
            ChatMessage(content=msg.content, role=msg.role)
            for msg in history_messages
        ]

        # 创建聊天引擎
        memory = ChatMemoryBuffer.from_defaults(
            chat_history=chat_history,
            token_limit=1024
        )
        chat_engine = SimpleChatEngine.from_defaults(
            llm=self.llm,
            memory=memory
        )

        # 获取模型回复
        response = await chat_engine.astream_chat(user_message)

        # 收集回复内容
        assistant_reply = ""
        async for token in response.async_response_gen():
            assistant_reply += token

        # 保存助手回复
        assistant_msg_db = Message(
            conversation_id=conversation_id,
            content=assistant_reply,
            role=MessageRole.assistant
        )
        db.add(assistant_msg_db)
        db.commit()

        return {
            "user_message": user_message,
            "assistant_message": assistant_reply,
            "conversation_id": conversation_id
        }

    async def chat_stream(self, db: Session, conversation_id: int, user_message: str):
        """流式返回 Markdown 格式的回复"""
        # 1. 保存用户消息（同之前逻辑）
        conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first()
        if not conversation:
            raise ValueError("会话不存在")

        user_msg_db = Message(
            conversation_id=conversation_id,
            content=user_message,
            role=MessageRole.user
        )
        db.add(user_msg_db)
        db.commit()

        # 2. 加载历史消息
        history_messages = self.get_conversation_messages(db, conversation_id)
        chat_history = [
            ChatMessage(content=msg.content, role=msg.role)
            for msg in history_messages
        ]

        # 3. 配置聊天引擎（启用流式）
        memory = ChatMemoryBuffer.from_defaults(
            chat_history=chat_history,
            token_limit=1024
        )
        chat_engine = SimpleChatEngine.from_defaults(
            llm=self.llm,
            memory=memory,
            streaming=True  # 启用流式输出
        )

        # 4. 生成并流式返回 Markdown 内容
        assistant_reply = ""
        response = await chat_engine.astream_chat(user_message)

        # 提示模型输出 Markdown 格式（在用户消息中附加指令，或在系统提示中预设）
        # 例如：在用户消息前添加 "请用 Markdown 格式回答："
        async for token in response.async_response_gen():
            assistant_reply += token
            # 每次生成一个 token 就立即返回
            yield f"data: {token}\n\n"  # SSE 格式（Server-Sent Events）

        # 5. 最后保存完整回复到数据库
        assistant_msg_db = Message(
            conversation_id=conversation_id,
            content=assistant_reply,
            role=MessageRole.assistant
        )
        db.add(assistant_msg_db)
        db.commit()