import uvicorn
from fastapi import FastAPI, Request, Path, Query
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
import yaml
import sys
import os
import uuid
from datetime import datetime

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 加载配置
def load_config():
    """加载配置文件"""
    config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'configs', 'config.yaml')
    if not os.path.exists(config_path):
        logger.error(f"配置文件不存在: {config_path}")
        sys.exit(1)
    
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    return config

# 创建简化版的FastAPI应用
def create_simple_app():
    """创建一个简化版的FastAPI应用，不依赖数据库和其他复杂组件"""
    app = FastAPI(
        title="AI助手API(简化版)",
        description="用于测试的简化版API，不依赖数据库",
        version="1.0.0",
        docs_url="/docs",
        redoc_url="/redoc"
    )
    
    # 添加CORS中间件
    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],  # 允许所有来源
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )
    
    # 模拟数据
    mock_sessions = [
        {
            "sessionId": "1",
            "title": "测试会话1",
            "createdAt": "2023-07-01T10:00:00Z",
            "updatedAt": "2023-07-01T10:30:00Z",
            "messageCount": 5
        },
        {
            "sessionId": "2",
            "title": "测试会话2",
            "createdAt": "2023-07-02T14:00:00Z",
            "updatedAt": "2023-07-02T15:00:00Z",
            "messageCount": 3
        }
    ]
    
    mock_messages = [
        {
            "messageId": "msg1",
            "content": "你好，AI助手！",
            "role": "user",
            "timestamp": "2023-07-01T10:00:00Z",
            "sessionId": "1"
        },
        {
            "messageId": "msg2",
            "content": "你好！我是AI助手。有什么可以帮助你的吗？",
            "role": "assistant",
            "timestamp": "2023-07-01T10:01:00Z",
            "sessionId": "1"
        }
    ]
    
    @app.get("/")
    def root():
        """根路径，返回API信息"""
        return {
            "name": "AI助手API(简化版)",
            "version": "1.0.0",
            "description": "用于测试的简化版API，不依赖数据库",
            "status": "running"
        }
    
    @app.get("/api")
    def api_root():
        """API根路径"""
        return {
            "name": "AI助手API",
            "version": "1.0.0",
            "status": "running"
        }
    
    @app.get("/api/health")
    def health_check():
        """健康检查接口"""
        return {
            "status": "healthy",
            "time": str(os.path.getmtime(__file__))
        }
    
    # 发送消息
    @app.post("/api/chat/message")
    async def send_message(request: Request):
        """发送消息接口"""
        data = await request.json()
        logger.info(f"收到消息: {data}")
        
        # 生成模拟响应
        response_data = {
            "messageId": str(uuid.uuid4()),
            "content": "这是一个简化版的AI助手响应。由于数据库服务未启动，无法提供完整功能。",
            "role": "assistant",
            "timestamp": datetime.now().isoformat(),
            "sessionId": data.get("sessionId", ""),
            "references": []
        }
        
        return JSONResponse(content=response_data)
    
    # 创建会话
    @app.post("/api/chat/session")
    async def create_session(request: Request):
        """创建会话接口"""
        data = await request.json()
        logger.info(f"创建会话: {data}")
        
        # 生成模拟会话
        session_id = str(uuid.uuid4())
        new_session = {
            "sessionId": session_id,
            "title": data.get("title", f"新会话 {len(mock_sessions) + 1}"),
            "createdAt": datetime.now().isoformat(),
            "updatedAt": datetime.now().isoformat(),
            "messageCount": 0
        }
        
        # 添加到模拟数据
        mock_sessions.append(new_session)
        
        return JSONResponse(content=new_session)
    
    # 获取会话列表
    @app.get("/api/chat/sessions")
    def get_sessions():
        """获取会话列表接口"""
        return JSONResponse(content=mock_sessions)
    
    # 获取会话详情
    @app.get("/api/chat/session/{sessionId}")
    def get_session_detail(sessionId: str = Path(...)):
        """获取会话详情接口"""
        session = next((s for s in mock_sessions if s["sessionId"] == sessionId), None)
        if not session:
            return JSONResponse(status_code=404, content={"detail": "会话不存在"})
        return JSONResponse(content=session)
    
    # 更新会话
    @app.put("/api/chat/session/{sessionId}")
    async def update_session(sessionId: str = Path(...), request: Request = None):
        """更新会话接口"""
        session = next((s for s in mock_sessions if s["sessionId"] == sessionId), None)
        if not session:
            return JSONResponse(status_code=404, content={"detail": "会话不存在"})
        
        data = await request.json()
        if "title" in data:
            session["title"] = data["title"]
        if "isFavorite" in data:
            session["isFavorite"] = data["isFavorite"]
        session["updatedAt"] = datetime.now().isoformat()
        
        return JSONResponse(content=session)
    
    # 删除会话
    @app.delete("/api/chat/session/{sessionId}")
    def delete_session(sessionId: str = Path(...)):
        """删除会话接口"""
        global mock_sessions
        session = next((s for s in mock_sessions if s["sessionId"] == sessionId), None)
        if not session:
            return JSONResponse(status_code=404, content={"detail": "会话不存在"})
        
        # 从模拟数据中删除
        mock_sessions = [s for s in mock_sessions if s["sessionId"] != sessionId]
        
        return JSONResponse(content={"success": True})
    
    # 导出会话
    @app.get("/api/chat/session/{sessionId}/export")
    def export_session(sessionId: str = Path(...), format: str = Query(default="json")):
        """导出会话接口"""
        session = next((s for s in mock_sessions if s["sessionId"] == sessionId), None)
        if not session:
            return JSONResponse(status_code=404, content={"detail": "会话不存在"})
        
        # 获取该会话的消息
        session_messages = [m for m in mock_messages if m["sessionId"] == sessionId]
        
        # 生成模拟导出数据
        export_data = {
            "session": session,
            "messages": session_messages,
            "exportTime": datetime.now().isoformat(),
            "format": format
        }
        
        return JSONResponse(content=export_data)
    
    # 获取消息列表
    @app.get("/api/chat/session/{sessionId}/messages")
    def get_messages(sessionId: str = Path(...), page: int = Query(default=1), pageSize: int = Query(default=50)):
        """获取消息列表接口"""
        # 获取该会话的消息
        session_messages = [m for m in mock_messages if m["sessionId"] == sessionId]
        
        # 简单分页
        start = (page - 1) * pageSize
        end = start + pageSize
        paginated_messages = session_messages[start:end]
        
        return JSONResponse(content={
            "items": paginated_messages,
            "total": len(session_messages),
            "page": page,
            "pageSize": pageSize
        })
    
    # 兼容前端的/api/chat/stream路径
    @app.post("/api/chat/stream")
    async def chat_stream_v1(request: Request):
        """处理前端的流式聊天请求(版本1)"""
        data = await request.json()
        logger.info(f"收到流式聊天请求(v1): {data}")
        
        # 返回模拟的流式响应
        async def stream_generator():
            messages = [
                '{"type":"start","messageId":"' + str(uuid.uuid4()) + '"}\n',
                '{"type":"content","content":"这是"}\n',
                '{"type":"content","content":"一个"}\n',
                '{"type":"content","content":"简化版的"}\n',
                '{"type":"content","content":"AI助手"}\n',
                '{"type":"content","content":"响应。"}\n',
                '{"type":"end"}\n'
            ]
            
            for message in messages:
                yield message.encode('utf-8')
                import asyncio
                await asyncio.sleep(0.2)
        
        return StreamingResponse(stream_generator(), media_type="text/event-stream")
    
    # OpenAI兼容的流式接口
    @app.post("/v1/chat/completions")
    async def openai_chat_completions(request: Request):
        """OpenAI兼容的聊天完成接口"""
        data = await request.json()
        logger.info(f"收到OpenAI兼容的聊天请求: {data}")
        
        # 提取用户消息
        user_message = ""
        if "messages" in data:
            for msg in data["messages"]:
                if msg["role"] == "user":
                    user_message = msg["content"]
                    break
        
        # 根据用户消息生成回复
        response_text = f"这是对您的问题 '{user_message}' 的回答。我是一个模拟的OpenAI兼容接口。"
        
        # 返回模拟的流式响应
        async def stream_generator():
            # 切分响应文本
            words = response_text.split(" ")
            for i, word in enumerate(words):
                # 构建符合OpenAI格式的响应
                delta = {"content": word + (" " if i < len(words) - 1 else "")}
                chunk = {
                    "id": f"chatcmpl-{str(uuid.uuid4())[:8]}",
                    "object": "chat.completion.chunk",
                    "created": int(datetime.now().timestamp()),
                    "model": data.get("model", "qwen2.5-7b-instruct-q4_k_m"),
                    "choices": [{
                        "index": 0,
                        "delta": delta,
                        "finish_reason": None
                    }]
                }
                yield f"data: {json.dumps(chunk)}\n\n"
                import asyncio
                await asyncio.sleep(0.1)  # 控制流速度
            
            # 发送结束标志
            yield "data: [DONE]\n\n"
        
        # 检查是否需要流式响应
        if data.get("stream", False):
            return StreamingResponse(stream_generator(), media_type="text/event-stream")
        else:
            # 非流式响应
            response = {
                "id": f"chatcmpl-{str(uuid.uuid4())[:8]}",
                "object": "chat.completion",
                "created": int(datetime.now().timestamp()),
                "model": data.get("model", "qwen2.5-7b-instruct-q4_k_m"),
                "choices": [{
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": response_text
                    },
                    "finish_reason": "stop"
                }],
                "usage": {
                    "prompt_tokens": len(user_message),
                    "completion_tokens": len(response_text),
                    "total_tokens": len(user_message) + len(response_text)
                }
            }
            return JSONResponse(content=response)
        
    # 兼容前端的/api/api/chat/stream路径
    @app.post("/api/api/chat/stream")
    async def chat_stream_v2(request: Request):
        """处理前端的流式聊天请求(版本2)"""
        data = await request.json()
        logger.info(f"收到流式聊天请求(v2): {data}")
        
        # 返回模拟的流式响应
        async def stream_generator():
            messages = [
                '{"type":"start","messageId":"' + str(uuid.uuid4()) + '"}\n',
                '{"type":"content","content":"这是"}\n',
                '{"type":"content","content":"一个"}\n',
                '{"type":"content","content":"简化版的"}\n',
                '{"type":"content","content":"AI助手"}\n',
                '{"type":"content","content":"响应。"}\n',
                '{"type":"end"}\n'
            ]
            
            for message in messages:
                yield message.encode('utf-8')
                import asyncio
                await asyncio.sleep(0.2)
        
        return StreamingResponse(stream_generator(), media_type="text/event-stream")
    
    return app

# 启动服务器
def main():
    """主函数"""
    try:
        # 加载配置
        config = load_config()
        
        # 创建简化版应用
        app = create_simple_app()
        
        logger.info("===== 启动简化版AI助手API ====")
        logger.info(f"启动FastAPI服务器: {config['api']['host']}:{config['api']['port']}")
        logger.info(f"文档访问地址: http://{config['api']['host']}:{config['api']['port']}/docs")
        
        # 启动服务器
        uvicorn.run(
            app,
            host=config['api']['host'],
            port=8001,  # 使用8001端口避免冲突
            workers=1,  # 简化版只使用1个工作进程
            reload=config['api']['debug'],
            log_level="info"
        )
        
    except KeyboardInterrupt:
        logger.info("用户中断，正在关闭服务器...")
    except Exception as e:
        logger.error(f"启动失败: {str(e)}", exc_info=True)
        sys.exit(1)

if __name__ == "__main__":
    main()