import asyncio
import time
from typing import AsyncGenerator

from dotenv import load_dotenv
from fastapi import HTTPException
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory

from llm import llmConfig
from llm.llmHistory import get_session_history

# 加载环境变量
load_dotenv()


def get_chat_chain(streaming: bool = False, callbacks=None):
    """创建聊天链，支持流式和非流式响应"""
    # 创建提示模板
    prompt = ChatPromptTemplate.from_messages([
        ("system", "你是一个有用的AI助手，根据用户的问题给出简洁明了的回答。"),
        MessagesPlaceholder(variable_name="chat_history"),
        ("human", "{input}")
    ])

    # 创建LLM
    llm = llmConfig.getCommonLLM(streaming, callbacks)

    # 创建基础链
    chain = prompt | llm

    # 添加消息历史
    chain_with_history = RunnableWithMessageHistory(
        chain,
        get_session_history=get_session_history,
        input_messages_key="input",
        history_messages_key="chat_history"
    )

    return chain_with_history


def commonAnswer(user_input: str, session_id: str = "default") -> str:
    """常规非流式回答"""
    try:
        # 创建聊天链
        chain_with_history = get_chat_chain(streaming=False)

        # 调用链，并传入会话ID
        response = chain_with_history.invoke(
            {"input": user_input},
            config={"configurable": {"session_id": session_id}}
        )

        print("response:", response.content)
        return response.content
    except Exception as e:
        print(f"AI回答出错: {str(e)}")
        raise HTTPException(status_code=500, detail=f"AI请求失败: {str(e)}")


async def streamingAnswer(user_input: str, session_id: str = "default") -> AsyncGenerator[str, None]:
    """流式回答，逐步生成并通过AsyncGenerator返回内容"""
    max_retries = 3
    retry_count = 0

    while retry_count < max_retries:

        # 创建带流式处理的回调
        callback_handler = StreamingStdOutCallbackHandler()

        # 创建聊天链
        chain_with_history = get_chat_chain(streaming=True, callbacks=[callback_handler])

        # 初始化缓冲区
        buffer = ""
        chunk_size = 5  # 减小到每次发送5个字符，提高稳定性

        # 添加超时保护
        last_chunk_time = time.time()
        timeout_seconds = 10  # 10秒超时

        # 异步调用链并流式获取响应
        async for chunk in chain_with_history.astream(
                {"input": user_input},
                config={"configurable": {"session_id": session_id}}
        ):
            # 更新最后接收时间
            last_chunk_time = time.time()

            # 如果是AIMessage，获取内容
            content = chunk.content if hasattr(chunk, "content") else str(chunk)

            # 向缓冲区添加内容并分块发送
            buffer += content
            while len(buffer) >= chunk_size:
                try:
                    data_chunk = buffer[:chunk_size]
                    yield data_chunk
                    buffer = buffer[chunk_size:]
                    # 添加短暂延迟以稳定流
                    await asyncio.sleep(0.01)
                except Exception as e:
                    print(f"发送数据块时出错: {str(e)}")
                    # 如果发送失败，保留buffer继续尝试
                    break

            # 检查超时
            if time.time() - last_chunk_time > timeout_seconds:
                print("流式回答生成超时，中断生成")
                yield "\n[回答生成超时，返回已生成内容]"
                break

        # 发送剩余内容
        if buffer:
            yield buffer
        # 成功完成，跳出重试循环
        break
