from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import requests
import logging

# 配置日志
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

app = FastAPI(title="大模型对话服务")

# 配置
API_TOKEN = os.getenv("SILICONFLOW_API_TOKEN")
API_URL = "https://api.siliconflow.cn/v1/"

class Message(BaseModel):
    role: str
    content: str

class ChatRequest(BaseModel):
    messages: List[Message]
    model: str = "Qwen/QwQ-32B"
    stream: bool = False
    max_tokens: int = 512
    temperature: float = 0.7
    n: int = 1

def create_chat_model(request: ChatRequest):
    """创建聊天模型实例"""
    callbacks = [StreamingStdOutCallbackHandler()] if request.stream else None
    model_kwargs = {
        # "top_p": request.top_p,
        # "frequency_penalty": request.frequency_penalty
    }
    return ChatOpenAI(
        model=request.model,
        temperature=request.temperature,
        max_tokens=request.max_tokens,
        streaming=request.stream,
        callbacks=callbacks,
        openai_api_key=API_TOKEN,
        openai_api_base=API_URL,
        model_kwargs=model_kwargs
    )

@app.post("/chat")
async def chat(request: ChatRequest):
    if not API_TOKEN:
        raise HTTPException(status_code=500, detail="API token not configured")
    
    try:
        logger.debug(f"API Token: {API_TOKEN[:5]}...")
        logger.debug(f"Request: {request.dict()}")
        
        # 创建聊天模型
        chat_model = create_chat_model(request)
        
        # 转换消息格式
        messages = []
        for msg in request.messages:
            if msg.role == "system":
                messages.append(SystemMessage(content=msg.content))
            else:
                messages.append(HumanMessage(content=msg.content))
        
        logger.debug(f"Messages: {messages}")
        
        # 获取响应
        response = await chat_model.ainvoke(messages)
        logger.debug(f"Response: {response}")
        
        return {
            "choices": [{
                "message": {
                    "role": "assistant",
                    "content": response.content
                }
            }]
        }
        
    except Exception as e:
        logger.error(f"Error: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/")
async def root():
    return {"message": "欢迎使用大模型对话服务"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000, reload=True) 