# app/services/ai_service.py
import json
import os

from fastapi import HTTPException
from fastapi.responses import StreamingResponse
from openai import OpenAI

from app.model.schemas import ModelRequest

API_ERROR_OUTPUT = "API 调用失败"
API_MAX_RETRY = 3


class AIService:
    def __init__(self):
        pass

    @staticmethod
    async def qwen3_32b(request: ModelRequest):
        """
        调用Qwen大模型进行对话，支持流式输出
        """
        api_key = os.getenv("QWEN_API_KEY")
        if not api_key:
            raise HTTPException(status_code=500, detail="QWEN_API_KEY 环境变量未设置")

        client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        completion = client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": request.prompt}
            ],
            stream=True,
            extra_body={"enable_thinking": request.thinking}
        )

        return AIService.create_streaming_response(completion)

    @staticmethod
    async def generate_text(query: str, hybrid_search_results):
        """
        基于检索结果生成回答
        """
        context_docs = [doc for doc, score in hybrid_search_results]
        context_scores = [score for doc, score in hybrid_search_results]
        context_str = "".join(context_docs) if context_docs else "无相关参考资料"

        # 构建 prompt
        prompt = f"""
            ###参考资料###
            {context_str}
            ###用户问题###
            {query}
            """

        api_key = os.getenv("QWEN_API_KEY")
        if not api_key:
            raise HTTPException(status_code=500, detail="QWEN_API_KEY 环境变量未设置")

        client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        completion = client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {"role": "system", "content": """
                    【角色定义】
                    你是一位基于给定参考资料的精准回答助手，必须严格遵循以下规则：
                    【回答规则】
                    **唯一依据**：仅使用<参考资料>中明确提及的信息
                    **边界处理**：若参考资料未提及相关内容，必须回复："根据现有资料无法回答该问题"
                    **格式要求**：段落间用空行分隔"""},
                {"role": "user", "content": prompt},
            ],
            stream=False,
            extra_body={"enable_thinking": True}
        )

        return {
            "answer": completion.model_dump_json(),
            "contexts": context_docs,
            "context_scores": context_scores,
            "context_count": len(context_docs)
        }

    @staticmethod
    async def stream_generate(query: str, hybrid_search_results, thinking: bool = True):
        """
        基于检索结果生成回答，支持流式输出
        """
        context_docs = [doc for doc, score in hybrid_search_results]
        context_str = "".join(context_docs) if context_docs else "无相关参考资料"

        # 构建 prompt
        prompt = f"""
        ###参考资料###
        {context_str}
        ###用户问题###
        {query}
        """

        api_key = os.getenv("QWEN_API_KEY")
        if not api_key:
            raise HTTPException(status_code=500, detail="QWEN_API_KEY 环境变量未设置")

        client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        completion = client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {"role": "system", "content": """
                【角色定义】
                你是一位基于给定参考资料的精准回答助手，必须严格遵循以下规则：
                【回答规则】
                **唯一依据**：仅使用<参考资料>中明确提及的信息
                **边界处理**：若参考资料未提及相关内容，必须回复："根据现有资料无法回答该问题"
                **格式要求**：段落间用空行分隔"""},
                {"role": "user", "content": prompt},
            ],
            stream=True,
            extra_body={"enable_thinking": thinking}
        )

        return AIService.create_streaming_response(completion)

    @staticmethod
    def create_streaming_response(completion):
        """
        创建流式响应的通用函数
        """

        async def generate_stream():
            try:
                for chunk in completion:
                    if chunk.choices and len(chunk.choices) > 0:
                        delta = chunk.choices[0].delta
                        # 动态构建 delta 数据
                        delta_data = {}
                        if hasattr(delta, 'content') and delta.content:
                            delta_data["content"] = delta.content
                        if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
                            delta_data["reasoning_content"] = delta.reasoning_content

                        # 只有当 delta_data 不为空时才发送数据
                        if delta_data:
                            data = {
                                "id": chunk.id,
                                "object": "chat.completion.chunk",
                                "created": chunk.created,
                                "model": chunk.model,
                                "choices": [{
                                    "index": 0,
                                    "delta": delta_data,
                                    "finish_reason": None
                                }]
                            }
                            yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
                # 发送结束标记
                yield "data: [DONE]\n\n"
            except BrokenPipeError:
                # 客户端断开连接，这是正常情况，不需要特殊处理
                print("客户端断开连接 (BrokenPipeError)")
                return
            except ConnectionResetError:
                # 客户端重置连接
                print("客户端重置连接 (ConnectionResetError)")
                return
            except Exception as e:
                # 其他错误处理
                error_data = {
                    "error": {
                        "message": str(e),
                        "type": "stream_error"
                    }
                }
                try:
                    yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
                except:
                    # 如果连错误信息都无法发送，就直接返回
                    pass

        # 返回流式响应
        return StreamingResponse(
            generate_stream(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "Access-Control-Allow-Origin": "*",
            }
        )


# 初始化AI服务实例
ai_service = AIService()
