import uvicorn
from fastapi import FastAPI, Query, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
import requests
import json
import time
import os
from dotenv import load_dotenv
from loguru import logger

# 创建FastAPI应用
app = FastAPI(title="流式聊天API服务", version="1.0")

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 加载环境变量
load_dotenv()

# 配置OpenAI兼容服务
OPENAI_BASE = os.getenv('OPENAI_BASE', 'http://127.0.0.1:8001/v1')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', 'sk-local')

# 初始化日志
logger.add("stream_server.log", rotation="10 MB")

@app.get("/")
def read_root():
    """根路径，返回服务信息"""
    return {
        "service": "流式聊天API服务",
        "version": "1.0",
        "status": "running",
        "endpoints": [
            {"method": "GET", "path": "/chat/stream", "description": "流式聊天接口，支持GET方法"}
        ]
    }

@app.get("/api/health")
def health_check():
    """健康检查接口"""
    return {"status": "healthy", "timestamp": time.time()}

def openai_stream(prompt: str):
    """调用OpenAI兼容接口并流式返回响应"""
    logger.info(f"准备发送请求到OpenAI兼容接口: {OPENAI_BASE}/chat/completions")
    logger.debug(f"请求提示词: {prompt}")
    
    try:
        # 构建请求体
        request_body = {
            "model": "Qwen2.5-3B-Instruct",
            "messages": [{"role": "user", "content": prompt}],
            "stream": True,
            "temperature": 0.7,
            "max_tokens": 2048
        }
        
        # 设置请求头
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {OPENAI_API_KEY}"
        }
        
        logger.info(f"发送POST请求，超时设置: 60秒")
        
        # 发送POST请求
        response = requests.post(
            f"{OPENAI_BASE}/chat/completions",
            json=request_body,
            headers=headers,
            stream=True,
            timeout=60
        )
        
        # 检查响应状态
        response.raise_for_status()
        logger.info(f"请求成功，状态码: {response.status_code}")
        
        # 逐行处理流式响应
        logger.info("开始处理流式响应...")
        for line in response.iter_lines(decode_unicode=True):
            if not line:
                continue
                
            logger.debug(f"接收到原始行: {line}")
            
            # 直接返回原始数据，不做额外处理
            yield f"{line}\n"
    except requests.exceptions.ConnectionError as e:
        logger.error(f"连接错误: {str(e)}")
        # 返回错误信息
        yield f"错误: 无法连接到OpenAI兼容服务。请检查服务是否在 {OPENAI_BASE} 运行。\n"
    except requests.exceptions.Timeout as e:
        logger.error(f"请求超时: {str(e)}")
        yield f"错误: 请求超时。\n"
    except requests.exceptions.HTTPError as e:
        logger.error(f"HTTP错误: {str(e)}")
        yield f"错误: HTTP错误 {e.response.status_code}。\n"
    except Exception as e:
        logger.error(f"OpenAI请求失败: {str(e)}")
        yield f"错误: 处理请求时发生未知错误: {str(e)}\n"
    
    logger.info("流式响应处理完成")

@app.get("/chat/stream")
def chat_stream(
    request: Request,
    q: str = Query(..., description="用户提问内容"),
):
    """
    GET方法的流式聊天接口
    接收查询参数q作为用户问题，返回流式响应
    """
    # 记录请求信息
    client_ip = request.client.host if request.client else "unknown"
    logger.info(f"接收到流式请求: IP={client_ip}, 查询={q}")
    
    # 创建并返回流式响应
    return StreamingResponse(
        openai_stream(q),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "X-Accel-Buffering": "no"
        }
    )

if __name__ == "__main__":
    # 启动服务器
    logger.info("===== 启动独立流式聊天API服务 ====")
    logger.info(f"OpenAI兼容服务地址: {OPENAI_BASE}")
    
    # 在5002端口启动服务
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=5002,
        workers=1,
        reload=True,
        log_level="info"
    )