import asyncio
import httpx
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
import json
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from openai import OpenAI
import os
from dotenv import load_dotenv
from prompt import SYS_PROMPT

# 加载环境变量
load_dotenv()

app = FastAPI()

@app.get("/")
async def root():
    return FileResponse("index.html")

# 配置静态文件服务
app.mount("/static", StaticFiles(directory="."), name="static")
app.mount("/dish", StaticFiles(directory="dish"), name="dish")

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有源
    allow_credentials=True,
    allow_methods=["GET", "POST", "OPTIONS"],  # 明确允许POST方法
    allow_headers=["*"],
)

# 配置NVIDIA API
NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
NVIDIA_MODEL =  os.getenv("NVIDIA_MODEL")

client = OpenAI(
    base_url= os.getenv("NVIDIA_URL"),
    api_key=NVIDIA_API_KEY,
    http_client=httpx.Client()
)

class ChatRequest(BaseModel):
    message: str
    history: list = []

@app.post("/api/chat")
async def chat(request: ChatRequest):
    try:
        # 准备消息
        messages = [{"role": "system", "content": SYS_PROMPT}]
        
        # 添加历史消息
        for msg in request.history:
            if 'role' in msg:
                messages.append({"role": msg['role'], "content": msg['content']})
        
        # 添加当前消息
        if request.message and request.message.strip():
            messages.append({"role": "user", "content": request.message})

        import time
        start_time = time.time()
        print(f"[{time.strftime('%H:%M:%S')}] 开始调用大模型API...")

        # 调用大模型API（流式响应）
        completion = client.chat.completions.create(
            model=NVIDIA_MODEL,
            messages=messages,
            temperature=0.2,
            top_p=0.7,
            max_tokens=1024,
            stream=True
        )

        # 流式处理响应
        async def generate():
            response = ""
            chunk_count = 0
            first_chunk_time = None
            for chunk in completion:
                chunk_time = time.time()
                if first_chunk_time is None:
                    first_chunk_time = chunk_time
                    print(f"[{time.strftime('%H:%M:%S')}] 收到第一个chunk，耗时: {first_chunk_time - start_time:.2f}秒")

                if chunk.choices[0].delta.content is not None:
                    response += chunk.choices[0].delta.content
                    chunk_count += 1
                    # 立即发送响应，不等待完整句子
                    if chunk.choices[0].delta.content:
                        yield json.dumps({
                            "response": chunk.choices[0].delta.content
                        }) + "\n"
                        # 立即刷新缓冲区
                        await asyncio.sleep(0)
                        print(f"[{time.strftime('%H:%M:%S')}] 发送第{chunk_count}个chunk，内容: {chunk.choices[0].delta.content}")

            end_time = time.time()
            print(f"[{time.strftime('%H:%M:%S')}] 完成响应，总耗时: {end_time - start_time:.2f}秒")
            print(f"[{time.strftime('%H:%M:%S')}] 总共收到{chunk_count}个chunk")

        return StreamingResponse(generate(), media_type="application/json")

    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000, ssl_keyfile="key.pem", ssl_certfile="cert.pem")
