"""
FastAPI 应用
提供RESTful API接口
"""
import os
import logging
from typing import List, Optional, Dict, Any
from pathlib import Path
import shutil
from fastapi import FastAPI, File, UploadFile, HTTPException, Depends, status, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from typing import List, Dict, Any, Optional, Union
import os
from datetime import datetime
import logging
from enum import Enum
import shutil
from pathlib import Path
import uuid
import json
import uvicorn

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 导入配置和模型
from config import settings, ModelType
from rag_service import RAGService, RAGConfig

# 创建必要的目录
os.makedirs("uploads", exist_ok=True)
os.makedirs("vector_store", exist_ok=True)

app = FastAPI(title="个人知识库API", version="0.1.0")

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局RAG服务实例
rag_service = None

def get_rag_service(
    model_type: str = settings.model_type,
    model_name: str = None,
    embedding_model: str = None,
    device: str = settings.device,
    n_ctx: int = 4096,
    n_gpu_layers: int = 0
) -> RAGService:
    """
    获取或创建RAG服务实例
    
    Args:
        model_type: 模型类型 (openai, local, gguf)
        model_name: 模型名称或路径
        embedding_model: 嵌入模型名称
        device: 运行设备
        n_ctx: 上下文长度 (仅GGUF)
        n_gpu_layers: GPU加速层数 (仅GGUF)
    
    Returns:
        RAGService: RAG服务实例
    """
    global rag_service
    
    try:
        # 如果模型类型是openai，使用OpenAI模型
        if model_type == "openai":
            model_name = model_name or settings.openai_model
            embedding_model = embedding_model or settings.local_embedding_model
        # 如果模型类型是gguf，使用GGUF模型
        elif model_type == "gguf":
            model_name = model_name or settings.gguf_model_path
            embedding_model = embedding_model or settings.local_embedding_model
            
            # 检查GGUF模型文件是否存在
            if not os.path.exists(model_name):
                # 尝试在models目录下查找
                model_basename = os.path.basename(model_name)
                models_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
                local_model_path = os.path.join(models_dir, model_basename)
                
                if os.path.exists(local_model_path):
                    model_name = local_model_path
                    logger.info(f"使用models目录下的模型: {model_name}")
                else:
                    raise FileNotFoundError(f"找不到GGUF模型文件: {model_name} 或 {local_model_path}")
        # 否则使用本地Hugging Face模型
        else:
            model_name = model_name or settings.local_llm_model
            embedding_model = embedding_model or settings.local_embedding_model
        
        # 创建RAG配置
        rag_config = RAGConfig(
            model_type=ModelType(model_type),
            model_name=model_name,
            local_model_name=model_name,
            local_embedding_model=embedding_model,
            device=device
        )
        
        # 如果是GGUF模型，添加GGUF特定配置
        if model_type == "gguf":
            rag_config.gguf_model_path = model_name
            rag_config.gguf_n_ctx = n_ctx
            rag_config.gguf_n_gpu_layers = n_gpu_layers
        
        # 创建或更新RAG服务
        if rag_service is None:
            rag_service = RAGService(rag_config)
        else:
            # 如果配置有变化，则重新创建服务
            if (rag_service.config.model_type != rag_config.model_type or
                rag_service.config.model_name != rag_config.model_name or
                rag_service.config.local_embedding_model != rag_config.local_embedding_model or
                (model_type == "gguf" and (
                    rag_service.config.gguf_n_ctx != rag_config.gguf_n_ctx or
                    rag_service.config.gguf_n_gpu_layers != rag_config.gguf_n_gpu_layers
                ))):
                rag_service = RAGService(rag_config)
        
        return rag_service
    except Exception as e:
        logger.error(f"初始化RAG服务失败: {str(e)}")
        raise

def get_file_size(file_path: str) -> int:
    """获取文件大小（字节）"""
    return os.path.getsize(file_path)

def get_file_extension(file_path: str) -> str:
    """获取文件扩展名"""
    return Path(file_path).suffix.lower()

def save_upload_file(upload_file: UploadFile, destination: str) -> str:
    """保存上传的文件"""
    try:
        file_path = os.path.join(destination, upload_file.filename)
        with open(file_path, "wb") as buffer:
            shutil.copyfileobj(upload_file.file, buffer)
        return file_path
    except Exception as e:
        logger.error(f"保存文件失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"保存文件失败: {str(e)}")
    finally:
        upload_file.file.close()

# 模型定义
class DocumentMetadata(BaseModel):
    """文档元数据模型"""
    source: Optional[str] = None
    category: Optional[str] = None
    created_at: Optional[datetime] = None
    custom_metadata: Optional[Dict[str, Any]] = None

class DocumentModel(BaseModel):
    """文档模型"""
    id: str
    name: str
    path: str
    size: int
    file_type: str
    upload_time: datetime
    metadata: Optional[DocumentMetadata] = None

class SearchResult(BaseModel):
    """搜索结果模型"""
    document_id: str
    document_name: str
    content: str
    score: float
    metadata: Optional[Dict[str, Any]] = None

class DocumentUpload(BaseModel):
    """文档上传模型"""
    file: UploadFile = File(...)
    metadata: Optional[Dict[str, Any]] = None

class SearchQuery(BaseModel):
    """搜索查询模型"""
    query: str
    top_k: int = 4
    filter: Optional[Dict[str, Any]] = None

class RAGQuery(SearchQuery):
    """RAG查询模型"""
    pass

class ModelConfig(BaseModel):
    """模型配置模型"""
    model_type: str
    model_name: str
    embedding_model: str
    n_ctx: Optional[int] = None
    n_gpu_layers: Optional[int] = None

class RAGResponse(BaseModel):
    """RAG响应模型"""
    answer: str
    sources: List[Dict[str, Any]] = []
    context: List[Dict[str, Any]] = []
    error: Optional[str] = None

# 初始化默认RAG服务
rag_service = get_rag_service()

# API端点
@app.post("/api/v1/documents/upload", response_model=DocumentModel)
async def upload_document(
    file: UploadFile = File(...),
    metadata: Optional[str] = None
):
    """
    上传文档到知识库
    """
    try:
        # 验证文件类型
        allowed_extensions = ['.txt', '.pdf', '.docx', '.pptx']
        file_ext = get_file_extension(file.filename)
        if file_ext not in allowed_extensions:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的文件类型: {file_ext}. 支持的类型: {', '.join(allowed_extensions)}"
            )
        
        # 保存上传的文件
        upload_dir = "uploads"
        os.makedirs(upload_dir, exist_ok=True)
        file_path = save_upload_file(file, upload_dir)
        
        # 解析元数据
        doc_metadata = {}
        if metadata:
            try:
                doc_metadata = json.loads(metadata)
            except json.JSONDecodeError:
                logger.warning(f"无效的元数据格式: {metadata}")
        
        # 添加到向量存储
        doc_id = str(uuid.uuid4())
        rag_service.add_documents([file_path], [doc_metadata])
        
        # 返回文档信息
        return {
            "id": doc_id,
            "name": file.filename,
            "path": file_path,
            "size": get_file_size(file_path),
            "file_type": file_ext,
            "upload_time": datetime.now().isoformat(),
            "metadata": doc_metadata
        }
        
    except Exception as e:
        logger.error(f"上传文档失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"上传文档失败: {str(e)}"
        )

@app.get("/api/v1/documents", response_model=List[DocumentModel])
async def list_documents():
    """
    获取所有已上传的文档列表
    """
    try:
        # 这里简化处理，实际应从数据库或向量存储中获取文档列表
        upload_dir = "uploads"
        if not os.path.exists(upload_dir):
            return []
            
        documents = []
        for filename in os.listdir(upload_dir):
            file_path = os.path.join(upload_dir, filename)
            if os.path.isfile(file_path):
                documents.append({
                    "id": str(uuid.uuid5(uuid.NAMESPACE_URL, file_path)),
                    "name": filename,
                    "path": file_path,
                    "size": get_file_size(file_path),
                    "file_type": get_file_extension(filename),
                    "upload_time": datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat(),
                    "metadata": {}
                })
        
        return documents
    except Exception as e:
        logger.error(f"获取文档列表失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"获取文档列表失败: {str(e)}"
        )

@app.get("/api/v1/search", response_model=List[SearchResult])
async def search_documents(
    query: str,
    top_k: int = 4,
    filter: Optional[Dict[str, Any]] = None
):
    """
    在知识库中搜索文档
    """
    try:
        results = rag_service.search(query, top_k=top_k, filter=filter)
        return [
            {
                "document_id": str(uuid.uuid5(uuid.NAMESPACE_URL, doc.metadata.get("source", ""))),
                "document_name": os.path.basename(doc.metadata.get("source", "")),
                "content": doc.page_content,
                "score": score,
                "metadata": doc.metadata
            }
            for doc, score in results
        ]
    except Exception as e:
        logger.error(f"搜索文档失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"搜索文档失败: {str(e)}"
        )

@app.post("/api/v1/ask", response_model=RAGResponse)
async def ask_question(
    query: RAGQuery,
    model_config: Optional[ModelConfig] = None
):
    """
    提问并获取回答
    """
    try:
        # 更新模型配置
        if model_config:
            rag_service = get_rag_service(
                model_type=model_config.model_type,
                model_name=model_config.model_name,
                embedding_model=model_config.embedding_model,
                n_ctx=model_config.n_ctx or 4096,
                n_gpu_layers=model_config.n_gpu_layers or 0
            )
        
        # 执行RAG查询
        result = rag_service.rag_chain(
            query=query.query,
            filter=query.filter
        )
        
        # 格式化响应
        return {
            "answer": result.get("answer", "未能生成回答"),
            "sources": result.get("sources", []),
            "context": result.get("context", []),
            "error": None
        }
        
    except Exception as e:
        logger.error(f"提问时出错: {str(e)}")
        return {
            "answer": "",
            "sources": [],
            "context": [],
            "error": f"处理问题时出错: {str(e)}"
        }

@app.post("/api/v1/models/update")
async def update_model(config: ModelConfig):
    """
    更新模型配置
    """
    try:
        global rag_service
        rag_service = get_rag_service(
            model_type=config.model_type,
            model_name=config.model_name,
            embedding_model=config.embedding_model,
            n_ctx=config.n_ctx or 4096,
            n_gpu_layers=config.n_gpu_layers or 0
        )
        return {"status": "success", "message": "模型配置已更新"}
    except Exception as e:
        logger.error(f"更新模型配置时出错: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"更新模型配置时出错: {str(e)}"
        )

@app.get("/api/v1/health")
async def health_check():
    """健康检查"""
    return {"status": "ok"}

# 挂载静态文件目录
app.mount("/uploads", StaticFiles(directory="uploads"), name="uploads")

def run_server():
    """运行FastAPI服务器"""
    uvicorn.run(
        "api:app",
        host=settings.host,
        port=settings.port,
        reload=settings.debug,
        log_level="info"
    )

if __name__ == "__main__":
    run_server()
