import uvicorn
from fastapi import FastAPI, Query, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
import json
import time
import os
import asyncio
import random
from loguru import logger
import uvicorn

# 创建FastAPI应用
app = FastAPI(title="独立流式聊天API服务", version="1.0")

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 初始化日志
logger.add("standalone_stream_server.log", rotation="10 MB")

# 示例回复库 - 用于生成模拟响应
RESPONSES = [
    "你好！我是一个AI助手，很高兴为你提供帮助。",
    "感谢你的提问！我会尽力回答你的问题。",
    "这是一个很好的问题。让我为你详细解答。",
    "我理解你的需求，让我思考一下如何最好地帮助你。",
    "根据我的理解，你想了解关于这个话题的信息。以下是我的回答。",
    "谢谢你的耐心等待。我已经准备好回答你的问题了。"
]

@app.get("/")
def read_root():
    """根路径，返回服务信息"""
    return {
        "service": "独立流式聊天API服务",
        "version": "1.0",
        "status": "running",
        "endpoints": [
            {"method": "GET", "path": "/chat/stream", "description": "流式聊天接口，支持GET方法"}
        ]
    }

@app.get("/api/health")
def health_check():
    """健康检查接口"""
    return {"status": "healthy", "timestamp": time.time()}

async def generate_responses(prompt: str):
    """在内存中生成模拟的流式响应"""
    try:
        # 记录请求信息
        logger.info(f"生成响应: 查询='{prompt}'")
        
        # 选择一个示例回复
        response_text = random.choice(RESPONSES)
        
        # 生成更个性化的回复
        full_response = f"你问: '{prompt}'\n我的回答: {response_text}\n这是一个独立运行的流式API服务，不依赖任何外部模型。"
        
        # 将回复按字符拆分为多个块
        chunk_size = random.randint(1, 3)  # 随机块大小，使流更自然
        for i in range(0, len(full_response), chunk_size):
            chunk = full_response[i:i+chunk_size]
            # 生成符合OpenAI格式的响应
            chunk_data = {
                "id": f"chatcmpl-{time.time():.0f}",
                "object": "chat.completion.chunk",
                "created": int(time.time()),
                "model": "standalone-chat-model",
                "choices": [{
                    "index": 0,
                    "delta": {"content": chunk},
                    "finish_reason": None
                }]
            }
            
            # 发送数据块
            yield f"data: {json.dumps(chunk_data)}\n\n"
            
            # 控制流速度
            await asyncio.sleep(random.uniform(0.05, 0.15))
        
        # 发送结束标志
        yield "data: [DONE]\n\n"
        logger.info("流式响应生成完成")
    except Exception as e:
        logger.error(f"生成响应失败: {str(e)}")
        # 返回错误信息
        error_data = {
            "id": f"chatcmpl-error-{time.time():.0f}",
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": "standalone-chat-model",
            "choices": [{
                "index": 0,
                "delta": {"content": f"错误: 生成响应时出错: {str(e)}"},
                "finish_reason": "error"
            }]
        }
        yield f"data: {json.dumps(error_data)}\n\n"
        yield "data: [DONE]\n\n"

@app.get("/chat/stream")
async def chat_stream(
    request: Request,
    q: str = Query(..., description="用户提问内容"),
):
    """
    GET方法的流式聊天接口
    接收查询参数q作为用户问题，返回流式响应
    """
    # 记录请求信息
    client_ip = request.client.host if request.client else "unknown"
    logger.info(f"接收到流式请求: IP={client_ip}, 查询={q}")
    
    # 创建并返回流式响应
    return StreamingResponse(
        generate_responses(q),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "X-Accel-Buffering": "no"
        }
    )

if __name__ == "__main__":
    # 启动服务器
    logger.info("===== 启动独立流式聊天API服务 ====")
    logger.info("服务完全独立运行，不依赖任何外部模型或服务")
    
    # 在5003端口启动服务
    uvicorn.run(
        "standalone_stream_server:app",
        host="0.0.0.0",
        port=5003,
        reload=True,
        log_level="info"
    )