from openai import OpenAI
from dotenv import load_dotenv
import os
import json
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel


# 加载环境变量
load_dotenv()

# 初始化FastAPI应用
app = FastAPI(title="ChatGPT API")

# 配置CORS中间件，允许前端访问
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应限制为特定域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 获取API密钥和基础URL
api_key = os.getenv("OPENAI_API_KEY", "sk-f772674180")
api_base = os.getenv("OPENAI_API_BASE", "https://api.deepseek.com")

# 创建OpenAI客户端实例
client = OpenAI(api_key=api_key, base_url=api_base)

# 定义请求模型
class ChatRequest(BaseModel):
    message: str
    conversation_history: list = None
    stream: bool = False

# 定义响应模型
class ChatResponse(BaseModel):
    response: str

@app.get("/")
def read_root():
    return {"message": "Welcome to ChatGPT API"}

@app.post("/chat")
async def chat_with_gpt(request: ChatRequest):
    # 准备对话历史
    messages = []
    if request.conversation_history:
        messages.extend(request.conversation_history)
    
    # 添加用户最新消息
    messages.append({"role": "user", "content": request.message})
    
    # 调用OpenAI API
    try:
        if request.stream:
            # 流式响应模式
            def generate():
                try:
                    response = client.chat.completions.create(
                        model="deepseek-chat",  # 可以根据需要更改为其他模型
                        messages=messages,
                        max_tokens=1000,
                        n=1,
                        stop=None,
                        temperature=0.7,
                        stream=True  # 启用流式响应
                    )
                    
                    # 处理流式响应
                    for chunk in response:
                        if chunk.choices and chunk.choices[0].delta.content:
                            content = chunk.choices[0].delta.content
                            # 将每个内容块以JSON格式返回
                            yield f"{json.dumps({'response': content})}\n"
                except Exception as e:
                    # 确保即使在异常情况下也能正确响应
                    yield f"{json.dumps({'error': str(e)})}\n"
            
            return StreamingResponse(generate(), media_type="application/jsonlines")
        else:
            # 非流式响应模式
            response = client.chat.completions.create(
                model="deepseek-chat",  # 可以根据需要更改为其他模型
                messages=messages,
                max_tokens=1000,
                n=1,
                stop=None,
                temperature=0.7,
            )
            
            # 提取回复内容（新版API的响应解析方式）
            reply = response.choices[0].message.content.strip()
            
            return {"response": reply}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=5011)