# deepseek.py
from fastapi import FastAPI
from pydantic import BaseModel
from openai import AsyncOpenAI
import yaml
import os
from fastapi.responses import StreamingResponse
import json
from typing import AsyncGenerator

app = FastAPI()

# 配置加载类
class ChatConfig:
    def __init__(self):
        self._load_config()
        
    def _load_config(self):
        config_path = os.path.join(os.path.dirname(__file__), "../config.yaml")
        with open(config_path, "r", encoding="utf-8") as f:
            self.config = yaml.safe_load(f)
        
        self.openai_config = self.config["openai"]
        self.prompt_templates = self.config["prompts"]

# 聊天服务类
class ChatService:
    def __init__(self, config: ChatConfig):
        self.config = config
        self.client = AsyncOpenAI(
            api_key=config.openai_config["api_key"],
            base_url=config.openai_config["base_url"]
        )
    
    async def stream_completion(self, query: str) -> AsyncGenerator[dict, None]:
        """异步流式响应生成器"""
        try:
            # 构建消息模板
            messages = [
                {
                    "role": "system",
                    "content": self.config.prompt_templates["default"]["system"]
                },
                {
                    "role": "user",
                    "content": self.config.prompt_templates["default"]["template"].format(query=query)
                }
            ]
            
            # 创建异步流式响应
            stream = await self.client.chat.completions.create(
                model=self.config.openai_config["model"],
                messages=messages,
                stream=True
            )

            reasoning_buffer = []
            answer_buffer = []

            # 处理流式响应
            async for chunk in stream:
                if not chunk.choices:
                    continue

                delta = chunk.choices[0].delta
                
                # 处理思考过程
                if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
                    reasoning_buffer.append(delta.reasoning_content)
                    yield {
                        "type": "reasoning",
                        "content": delta.reasoning_content
                    }
                
                # 处理正式回答
                if delta.content:
                    answer_buffer.append(delta.content)
                    yield {
                        "type": "answer",
                        "content": delta.content
                    }

            # 最终完成事件
            yield {
                "type": "complete",
                "reasoning": "".join(reasoning_buffer),
                "answer": "".join(answer_buffer)
            }

        except Exception as e:
            yield {
                "type": "error",
                "content": f"API请求失败: {str(e)}"
            }

# 初始化依赖
config = ChatConfig()
chat_service = ChatService(config)

# API请求模型
class ChatRequest(BaseModel):
    query: str

# 聊天端点
@app.post("/chat")
async def chat_endpoint(request: ChatRequest):
    async def generate():
        try:
            async for chunk in chat_service.stream_completion(request.query):
                # 格式化SSE事件
                event_data = json.dumps(chunk, ensure_ascii=False)
                yield f"data: {event_data}\n\n"
        except Exception as e:
            error_data = json.dumps({
                "type": "error",
                "content": f"服务器错误: {str(e)}"
            }, ensure_ascii=False)
            yield f"data: {error_data}\n\n"

    return StreamingResponse(
        generate(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "X-Accel-Buffering": "no"  # 禁用Nginx缓冲
        }
    )

# 健康检查端点
@app.get("/health")
async def health_check():
    return {
        "status": "running",
        "model": config.openai_config["model"],
        "support_features": ["streaming", "reasoning_process"]
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8000,
        # 生产环境应禁用reload
        # reload=os.getenv("ENV") == "development"  
    )