# server/src/main.py
import os
from pathlib import Path
from fastapi import FastAPI, APIRouter, HTTPException, Request
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Dict
from model_manager import model_manager
import torch
import psutil
import json
from loguru import logger
from schemas import ChatMessage,ChatRequest  # ✅ 从独立模块导入
import warnings
# 忽略DeprecationWarning警告
warnings.filterwarnings("ignore", category=DeprecationWarning)

# 联网搜索模块
from config import Config
from search.base import SearchProvider, DummySearch
# 搜索提供者
from search.bing import BingSearch
from search.ddg import DuckDuckGoSearch
from search.baidu import BaiduSearch

# 计算绝对路径
BASE_DIR = Path(__file__).resolve().parent.parent.parent  # 项目根目录
STATIC_DIR = BASE_DIR / "chat_ui" / "dist"

# 创建FastAPI应用
app = FastAPI(title="AI Studio")

# 允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)

# 请求模型
# class ChatRequest(BaseModel):
#     messages: List[ChatMessage]  # 消息列表
#     max_tokens: Optional[int] = 2048  # 最大生成长度
#     temperature: Optional[float] = 0.1  # 随机性
#     top_p: Optional[float] = 0.1  # 多样性
#     repetition_penalty: Optional[float] = 1.2  # 重复惩罚
#     stream: Optional[bool] = False  # 是否启用流式输出

class ModelLoadRequest(BaseModel):
    model_path: str  # 前端传入的路径字符串

# 创建API路由
api_router = APIRouter()



# 搜索提供者
def get_search_provider(provider_name: str) -> SearchProvider:
    """动态获取搜索模块"""
    providers = {
        "bing": BingSearch,
        "ddg": DuckDuckGoSearch,
        "baidu": BaiduSearch
    }
    return providers.get(provider_name.lower(), DummySearch)()

# 大语言模型API请求接口
@api_router.post("/v1/chat/completions")
async def chat_completions(request: ChatRequest):
    if not model_manager.model:
        raise HTTPException(status_code=400, detail="Model not loaded")    
    
    # messages = [msg.dict() for msg in request.messages]
    # 启用搜索功能时，使用下面的代码
    # 该变量仅联网搜索功能使用
    processed_messages  = [msg.dict() for msg in request.messages]

    # 处理联网搜索逻辑
    search_success = False
    if request.use_search:
        try:
            logger.info(f"启用联网搜索功能 | 尝试使用[{request.search_provider}]进行联网搜索...")
            
            # 获取搜索实例（使用默认ddg或指定引擎）
            search_engine = get_search_provider(request.search_provider)

            # 检查搜索引擎是否有效
            if isinstance(search_engine, DummySearch):
                raise RuntimeError("搜索引擎配置无效")
            
            # 仅处理最后一条用户消息
            if processed_messages and processed_messages[-1]["role"] == "user":
                user_query = processed_messages[-1]["content"]

                # 执行搜索（带超时保护）
                search_results = search_engine.search(user_query)
                
                # 执行搜索并插入系统提示
                if search_results:
                    processed_messages.insert(-1, {
                        "role": "system",
                        "content": f"网络搜索结果：{search_results}\n请根据以上信息回答"
                    })
                    search_success = True
                    logger.debug("搜索成功并插入结果")
                else:
                    logger.warning("搜索引擎返回空结果")
        except Exception as e:
            logger.error(f"搜索失败，将使用常规模式 | 错误：{str(e)}")
            # 处理密钥缺失时的降级策略
            request.use_search = False  # 强制关闭搜索功能
            search_success = False
    
    # 测试启用搜索功能后
    
    try:
        # 根据搜索是否成功选择最终消息列表
        #print("search_success:", processed_messages)  # 测试输出
        final_messages = processed_messages if search_success else request.messages
        # 直接传递 messages 到 model_manager
        if request.stream:
            print("流式接口被调用")
            # 创建 StreamingResponse 并设置 headers
            response = StreamingResponse(
                stream_generate(final_messages, request),  # 传递 messages
                media_type="text/event-stream",
            )
            response.headers["Cache-Control"] = "no-cache"  # ✅ 禁用缓存
            return response
        else:
            print("普通接口被调用")
            # 非流式接口需要同步调用 generate 方法（需修正）
            prompt = " ".join([f"{msg.role}: {msg.content}" for msg in final_messages]) + "assistant: "
            response = model_manager.generate(
                prompt=prompt,  # ✅ 传递 prompt 而非 messages
                max_length=request.max_tokens,
                temperature=request.temperature,
                top_p=request.top_p,
                repetition_penalty=request.repetition_penalty
            )        
            return {
                "choices": [{
                    "message": {
                        "role": "assistant",
                        "content": response
                    }
                }]
            }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 联网搜索功能API请求接口
@api_router.post("/v1/chat/completions_with_search")
async def chat_completions_with_search(request: ChatRequest):
    if not model_manager.model:
        raise HTTPException(status_code=400, detail="Model not loaded")
    
    # 强制启用搜索功能
    request.use_search = True
    
    messages = [msg.dict() for msg in request.messages]

    # 动态获取搜索实例
    print("强制启用联网搜索功能")
    search_engine = get_search_provider(request.search_provider)
    if messages[-1]["role"] == "user":
        user_query = messages[-1]["content"]
        search_results = search_engine.search(user_query)
        if search_results:
            messages.insert(-1, {
                "role": "system",
                "content": f"网络搜索结果：{search_results}\n请根据以上信息回答"
            })

    try:
        if request.stream:
            response = StreamingResponse(
                stream_generate(messages, request),
                media_type="text/event-stream",
            )
            response.headers["Cache-Control"] = "no-cache"
            return response
        else:
            prompt = " ".join([f"{msg.role}: {msg.content}" for msg in request.messages]) + "assistant: "
            response = model_manager.generate(
                prompt=prompt,
                max_length=request.max_tokens,
                temperature=request.temperature,
                top_p=request.top_p,
                repetition_penalty=request.repetition_penalty
            )        
            return {
                "choices": [{
                    "message": {
                        "role": "assistant",
                        "content": response
                    }
                }]
            }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


# 流式生成器
def stream_generate(messages: List[Dict], request: ChatRequest):
    try:
        for token in model_manager.generate_stream(
            messages=messages,  # 修正参数名为 messages
            max_length=request.max_tokens,
            temperature=request.temperature,
            top_p=request.top_p,
            repetition_penalty=request.repetition_penalty
        ):
            data = {
                "choices": [{
                    "delta": {
                        "role": "assistant",
                        "content": token
                    }
                }]
            }
            # print(f"data: {json.dumps(data, ensure_ascii=False)}") # 流式数据生成打印测试，目测正常            
            yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
        yield "data: [DONE]\n\n"
    except Exception as e:
        logger.error(f"Stream generation failed: {str(e)}")
        # 生产环境中不要暴露具体错误信息
        # yield f"data: {json.dumps({'error': str(e)})}\n\n"

# 用户中断生成
@api_router.post("/api/stop_generation")
async def stop_generation():
    try:
        model_manager.stop_generation()
        logger.success("Generation stopped successfully")
        return {"detail": "Generation stopped successfully."}
    except Exception as e:
        logger.error(f"Stop generation failed: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))

# 加载模型
@api_router.post("/api/load_model")
async def load_model(request: ModelLoadRequest):
    model_path = BASE_DIR / request.model_path
    if not model_path.exists():
        raise HTTPException(status_code=404, detail="Model path not found")
    if not model_path.is_dir():
        raise HTTPException(status_code=400, detail="Invalid model directory")
    
    try:
        model_manager.load_model(model_path)  # 直接传递 Path 对象
        return {"status": f"Model {model_path.name} loaded successfully"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 获取模型加载状态
@api_router.get("/api/status")
def get_status():
    return {
        "model_loaded": bool(model_manager.model),
        "current_model": model_manager.loaded_model_name,
        "device": model_manager.device
    }

# 获取系统信息
@api_router.get("/api/system_info")
def get_system_info():
    try:
        cpu_percent = psutil.cpu_percent()
        mem_info = psutil.virtual_memory()
        system_info = {
            "cpu_usage": f"{cpu_percent}",
            "memory_used": f"{mem_info.used / 1024**3:.2f}",
            "memory_total": f"{mem_info.total / 1024**3:.2f}"
        }
        if torch.cuda.is_available():
            mem = torch.cuda.memory_allocated() / 1024**3
            total = torch.cuda.get_device_properties(0).total_memory / 1024**3
            system_info.update({
                "gpu_memory_used": f"{mem:.2f}",
                "gpu_memory_total": f"{total:.2f}"
            })
        return system_info
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 注册API路由
app.include_router(api_router)

# 挂载静态资源
app.mount(
    "/assets",
    StaticFiles(directory=STATIC_DIR / "assets"),
    name="assets"
)
app.mount(
    "/",
    StaticFiles(directory=STATIC_DIR, html=True),
    name="root"
)

# 应用启动时加载默认模型
@app.on_event("startup")
async def startup_event():
    default_model = BASE_DIR / "models/DeepSeek-R1-1.5B"  # 确保路径存在
    if default_model.exists():
        try:
            model_manager.load_model(default_model)
        except Exception as e:
            logger.error(f"Failed to load default model: {str(e)}")

# SPA回退路由
@app.get("/{full_path:path}")
async def catch_all(full_path: str):
    return FileResponse(STATIC_DIR / "index.html")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        app,
        host="127.0.0.1", # localhost 访问 http://localhost:8000/ 查看应用
        port=8000,
        log_level="debug"
    )
       