#!/usr/bin/env python3
"""
FastAPI服务器 - 水库调度智能问答API
用于与Vue前端集成的后端服务
"""

from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Dict, Any
import uvicorn
import os
import sys
from pathlib import Path
import json
import asyncio

# 将项目根目录添加到Python路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

try:
    from D_db.tools.llm_query_tester import LLMQueryTester
    print("✅ 成功导入LLMQueryTester")
except ImportError as e:
    print(f"❌ 导入LLMQueryTester失败: {e}")
    print("请确保在项目根目录下运行此脚本")
    sys.exit(1)

# FastAPI应用初始化
app = FastAPI(
    title="水库调度智能问答API",
    description="基于Vue.js前端的水库调度知识问答系统",
    version="1.0.0",
    docs_url="/api/docs",
    redoc_url="/api/redoc"
)

# 配置CORS - 允许前端访问
app.add_middleware(
    CORSMiddleware,
    allow_origins=[
        "http://localhost:3000",
        "http://127.0.0.1:3000",
        "http://localhost:5173",  # Vite默认端口
        "http://127.0.0.1:5173"
    ],
    allow_credentials=True,
    allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
    allow_headers=["*"],
    expose_headers=["*"]
)

# 请求和响应模型
class QuestionRequest(BaseModel):
    question: str
    limit: int = 20

class QAItem(BaseModel):
    question: str
    answer: str
    category_id: Optional[str] = None
    match_count: Optional[str] = None
    context: Optional[str] = None

class AnswerResponse(BaseModel):
    question: str
    answer: str
    qa_results: List[Dict[str, Any]] = []
    success: bool = True
    timestamp: Optional[str] = None

class HealthResponse(BaseModel):
    status: str
    service: str
    llm_ready: bool
    timestamp: str

# 全局LLM查询器实例
tester = None

@app.on_event("startup")
async def startup_event():
    """应用启动时初始化LLM查询器"""
    global tester
    try:
        print("🚀 正在初始化LLM查询器...")
        tester = LLMQueryTester()
        print("✅ LLM查询器初始化成功")
        
        # 测试查询
        print("🔍 进行测试查询...")
        test_result = tester.test_query("测试水库调度", limit=1)
        if test_result and "answer" in test_result:
            print("✅ 测试查询成功")
        else:
            print("⚠️ 测试查询返回异常结果")
            
    except Exception as e:
        print(f"❌ LLM查询器初始化失败: {e}")
        tester = None
        print("⚠️ 服务将以降级模式运行，问答功能不可用")

@app.on_event("shutdown")
async def shutdown_event():
    """应用关闭时清理资源"""
    print("🛑 应用正在关闭...")
    global tester
    tester = None

@app.get("/api/health", response_model=HealthResponse)
async def health_check():
    """健康检查接口"""
    from datetime import datetime
    
    return HealthResponse(
        status="ok" if tester else "degraded",
        service="reservoir-qa-api",
        llm_ready=bool(tester),
        timestamp=datetime.now().isoformat()
    )

@app.post("/api/qa", response_model=AnswerResponse)
async def process_question(request: QuestionRequest):
    """处理问答请求的主接口"""
    from datetime import datetime
    
    if not tester:
        raise HTTPException(
            status_code=503, 
            detail="服务未就绪，LLM查询器初始化失败。请检查环境配置和API密钥。"
        )
    
    try:
        print(f"📝 收到问题: {request.question}")
        
        # 调用LLM查询器
        result = tester.test_query(request.question, limit=request.limit)
        
        if not result:
            raise HTTPException(
                status_code=500, 
                detail="查询返回空结果"
            )
        
        print(f"✅ 查询成功，返回结果长度: {len(str(result))}")
        
        # 构建响应
        return AnswerResponse(
            question=result.get("question", request.question),
            answer=result.get("answer", "抱歉，无法生成回答。请尝试重新提问。"),
            qa_results=result.get("qa_results", []),
            success=True,
            timestamp=datetime.now().isoformat()
        )
        
    except HTTPException:
        raise
    except Exception as e:
        print(f"❌ 处理问题时出错: {e}")
        raise HTTPException(
            status_code=500, 
            detail=f"处理问题时出错: {str(e)}"
        )

@app.get("/api/sample-questions")
async def get_sample_questions():
    """获取示例问题列表"""
    return {
        "questions": [
            "水库调度的一般原则是什么？",
            "如何制定水库的防洪调度方案？",
            "水库兴利调度的主要内容有哪些？",
            "水库调度中如何平衡防洪与兴利的关系？",
            "什么是水库调度图？如何绘制？",
            "水库调度自动化系统包括哪些功能？",
            "如何评估水库调度方案的合理性？",
            "水库调度中需要考虑哪些约束条件？"
        ],
        "success": True
    }

@app.get("/api/system-status")
async def get_system_status():
    """获取系统状态信息"""
    from datetime import datetime
    
    status = {
        "service": "reservoir-qa-api",
        "version": "1.0.0",
        "status": "running",
        "llm_ready": bool(tester),
        "timestamp": datetime.now().isoformat(),
        "features": {
            "qa_enabled": bool(tester),
            "health_check": True,
            "sample_questions": True,
            "system_status": True
        }
    }
    
    return status

# 流式输出接口（SSE）：先推送参考资料，再流式推送答案
@app.post("/api/qa/stream")
async def process_question_stream(request: QuestionRequest):
    """以SSE流式返回答案，同时先发送参考知识库内容"""
    from datetime import datetime

    if not tester:
        raise HTTPException(
            status_code=503,
            detail="服务未就绪，LLM查询器初始化失败。"
        )

    async def event_generator():
        try:
            result = tester.test_query(request.question, limit=request.limit)
            qa_results = result.get("qa_results", [])
            answer = result.get("answer", "")

            # 先推送参考资料
            refs_payload = json.dumps({"qa_results": qa_results}, ensure_ascii=False)
            yield f"event: refs\ndata: {refs_payload}\n\n"

            # 再流式推送答案内容（按块分割，模拟流式）
            # 放慢速率以呈现“打字机”效果
            chunk_size = 1
            i = 0
            while i < len(answer):
                end = min(i + chunk_size, len(answer))
                delta = answer[i:end]
                payload = json.dumps({"delta": delta}, ensure_ascii=False)
                yield f"event: delta\ndata: {payload}\n\n"
                i = end
                await asyncio.sleep(0.04)

            # 结束事件
            done_payload = json.dumps({
                "success": True,
                "question": request.question,
                "timestamp": datetime.now().isoformat()
            }, ensure_ascii=False)
            yield f"event: done\ndata: {done_payload}\n\n"
        except Exception as e:
            error_payload = json.dumps({"error": str(e)}, ensure_ascii=False)
            yield f"event: error\ndata: {error_payload}\n\n"

    return StreamingResponse(
        event_generator(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive"
        }
    )

# GET版本的SSE端点，便于前端使用EventSource
@app.get("/api/qa/stream_get")
async def process_question_stream_get(question: str, limit: int = 20):
    from datetime import datetime

    if not tester:
        raise HTTPException(status_code=503, detail="服务未就绪，LLM查询器初始化失败。")

    async def event_generator():
        try:
            result = tester.test_query(question, limit=limit)
            qa_results = result.get("qa_results", [])
            answer = result.get("answer", "")

            refs_payload = json.dumps({"qa_results": qa_results}, ensure_ascii=False)
            yield f"event: refs\ndata: {refs_payload}\n\n"

            chunk_size = 1
            i = 0
            while i < len(answer):
                end = min(i + chunk_size, len(answer))
                delta = answer[i:end]
                payload = json.dumps({"delta": delta}, ensure_ascii=False)
                yield f"event: delta\ndata: {payload}\n\n"
                i = end
                await asyncio.sleep(0.04)

            done_payload = json.dumps({
                "success": True,
                "question": question,
                "timestamp": datetime.now().isoformat()
            }, ensure_ascii=False)
            yield f"event: done\ndata: {done_payload}\n\n"
        except Exception as e:
            error_payload = json.dumps({"error": str(e)}, ensure_ascii=False)
            yield f"event: error\ndata: {error_payload}\n\n"

    return StreamingResponse(
        event_generator(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive"
        }
    )

if __name__ == "__main__":
    print("🌟 水库调度智能问答API服务启动中...")
    print("📖 API文档: http://localhost:8000/api/docs")
    print("🔍 健康检查: http://localhost:8000/api/health")
    print("💬 问答接口: http://localhost:8000/api/qa")
    
    uvicorn.run(
        app, 
        host="0.0.0.0", 
        port=8000,
        reload=False,  # 生产环境关闭reload
        log_level="info"
    )