from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Optional, Dict, Any, AsyncGenerator
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import logging
import json

# 配置日志
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

app = FastAPI(title="大模型对话服务")


# 配置 CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:5173"],  # 允许的前端域名
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有 HTTP 方法
    allow_headers=["*"],  # 允许所有 HTTP 头
)

# 配置
API_TOKEN = os.getenv("SILICONFLOW_API_TOKEN")
API_URL = "https://api.siliconflow.cn/v1/"

class Message(BaseModel):
    role: str
    content: str

class ChatRequest(BaseModel):
    messages: List[Message]
    model: str = "Qwen/QwQ-32B"
    stream: bool = True  # 默认开启流式
    max_tokens: int = 512
    temperature: float = 0.7
    n: int = 1

def create_chat_model(request: ChatRequest):
    """创建聊天模型实例"""
    callbacks = [StreamingStdOutCallbackHandler()] if request.stream else None
    model_kwargs = {
        # "top_p": request.top_p,
        # "frequency_penalty": request.frequency_penalty
    }
    return ChatOpenAI(
        model=request.model,
        temperature=request.temperature,
        max_tokens=request.max_tokens,
        streaming=request.stream,
        callbacks=callbacks,
        openai_api_key=API_TOKEN,
        openai_api_base=API_URL,
        model_kwargs=model_kwargs
    )

async def stream_response(chat_model, messages) -> AsyncGenerator[str, None]:
    """生成流式响应"""
    async for chunk in chat_model.astream(messages):
        if chunk.content:
            yield f"data: {json.dumps({'content': chunk.content})}\n\n"

@app.post("/chat")
async def chat(request: ChatRequest):
    if not API_TOKEN:
        raise HTTPException(status_code=500, detail="API token not configured")
    
    try:
        logger.debug(f"API Token: {API_TOKEN[:5]}...")
        logger.debug(f"Request: {request.dict()}")
        
        # 创建聊天模型
        chat_model = create_chat_model(request)
        
        # 转换消息格式
        messages = []
        for msg in request.messages:
            if msg.role == "system":
                messages.append(SystemMessage(content=msg.content))
            else:
                messages.append(HumanMessage(content=msg.content))
        
        logger.debug(f"Messages: {messages}")
        
        if request.stream:
            # 流式响应
            return StreamingResponse(
                stream_response(chat_model, messages),
                media_type="text/event-stream"
            )
        else:
            # 非流式响应
            response = await chat_model.ainvoke(messages)
            logger.debug(f"Response: {response}")
            
            return {
                "choices": [{
                    "message": {
                        "role": "assistant",
                        "content": response.content
                    }
                }]
            }
        
    except Exception as e:
        logger.error(f"Error: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/")
async def root():
    return {"message": "欢迎使用大模型对话服务"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000, reload=True) 