"""
FastAPI 服务：提供文件上传、配置更新、索引重建、RAG 查询与 Agent 查询接口。

与现有模块保持解耦，复用 config / embedding / retrieval / prompt / generation 等核心能力。
"""

from __future__ import annotations

import os
from pathlib import Path
from typing import Any, Dict, List, Optional
import json
from datetime import datetime
import shutil

from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import RedirectResponse
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, Field

from ..config import config
from ..data_reader import document_loader
from ..data_reader.document_loader import DocumentLoader
from ..data_storage import index_manager, kb_registry, IndexManager
from ..data_chunk import document_chunker
from ..data_embedding import embedding_manager
from ..retrieval import rag_engine
from ..generation import llm_manager
from ..prompt_engineering import prompt_manager
from ..agent.service import AgentService


DATA_DIR = Path(config.data_dir)
STORAGE_DIR = Path(config.index_dir)


class ConfigUpdate(BaseModel):
    data_dir: Optional[str] = None
    index_dir: Optional[str] = None
    local_model_path: Optional[str] = None
    ollama_model: Optional[str] = None
    openai_base_url: Optional[str] = None
    openai_chat_model: Optional[str] = None
    openai_embedding_model: Optional[str] = None
    api_key: Optional[str] = Field(default=None, description="OPENAI_API_KEY 或兼容 key")
    # 分块/并发相关
    chunk_strategy: Optional[str] = None
    chunk_size: Optional[int] = None
    chunk_overlap: Optional[int] = None
    min_chunk_size: Optional[int] = None
    max_chunk_size: Optional[int] = None
    semantic_chunk_threshold: Optional[float] = None
    semantic_chunk_similarity_window_size: Optional[int] = None
    max_concurrent_chunking: Optional[int] = None


class QueryRequest(BaseModel):
    query: str
    mode: str = Field(default="rag", description="rag | agent")
    top_k: int = 5
    show_sources: bool = True
    kb_id: str = Field(default="default", description="知识库ID")


class RebuildRequest(BaseModel):
    force: bool = True
    kb_id: str = Field(default="default", description="知识库ID")


class KBCreateRequest(BaseModel):
    name: str = Field(..., description="知识库名称")
    data_dir: Optional[str] = Field(default=None, description="可选自定义数据目录")
    index_dir: Optional[str] = Field(default=None, description="可选自定义索引目录")


class KBDeleteRequest(BaseModel):
    delete_files: bool = Field(default=False, description="是否删除原始文件")


def ensure_dirs() -> None:
    DATA_DIR.mkdir(parents=True, exist_ok=True)
    STORAGE_DIR.mkdir(parents=True, exist_ok=True)


def get_kb_or_404(kb_id: str) -> Dict[str, Any]:
    kb = kb_registry.get_kb(kb_id)
    if not kb:
        raise HTTPException(status_code=404, detail="知识库不存在")
    # 确保目录存在
    Path(kb["data_dir"]).mkdir(parents=True, exist_ok=True)
    Path(kb["index_dir"]).mkdir(parents=True, exist_ok=True)
    return kb


def build_loader(kb: Dict[str, Any]) -> DocumentLoader:
    return DocumentLoader(data_dir=kb["data_dir"])


def build_index_manager(kb: Dict[str, Any]) -> IndexManager:
    return IndexManager(index_dir=kb["index_dir"])


def collect_kb_stats(kb_id: str, loader: Optional[DocumentLoader] = None, manager: Optional[IndexManager] = None) -> None:
    kb = get_kb_or_404(kb_id)
    loader = loader or build_loader(kb)
    manager = manager or build_index_manager(kb)

    try:
        file_stats = loader.get_file_stats()
    except Exception:
        file_stats = {}

    try:
        index_info = manager.get_index_info()
    except Exception:
        index_info = {}

    kb_registry.update_stats(
        kb_id,
        {
            "file_stats": file_stats,
            "index_info": index_info,
            "index_exists": index_info.get("index_exists", False),
            "updated_at": datetime.utcnow().isoformat(),
        },
    )


def init_models() -> None:
    """初始化嵌入模型和LLM；只在首次调用时执行。"""
    embedding_manager.configure_llama_index()
    llm_manager.configure_llama_index()


def load_or_build_index_for_kb(kb_id: str, force_rebuild: bool = False) -> None:
    """加载或重建指定知识库的索引，并更新 RAG 引擎。"""
    kb = get_kb_or_404(kb_id)
    loader = build_loader(kb)
    manager = build_index_manager(kb)

    if force_rebuild:
        manager.delete_index()

    docs = loader.load_documents()
    if not docs:
        raise HTTPException(status_code=400, detail="未找到可用文档，请先上传后再重建索引。")

    index = manager.get_or_create_index(docs)
    rag_engine.update_index(index)
    collect_kb_stats(kb_id, loader=loader, manager=manager)


def update_env_file(updates: Dict[str, Any]) -> None:
    """将部分配置写入 .env 文件，便于后续启动保持一致。"""
    env_path = Path(".env")
    if env_path.exists():
        content = env_path.read_text(encoding="utf-8").splitlines()
    else:
        content = []

    env_map = {}
    for line in content:
        if not line.strip() or line.strip().startswith("#") or "=" not in line:
            continue
        k, v = line.split("=", 1)
        env_map[k.strip()] = v

    mapping = {
        "data_dir": "DATA_DIR",
        "index_dir": "INDEX_DIR",
        "local_model_path": "LOCAL_MODEL_PATH",
        "ollama_model": "OLLAMA_MODEL",
        "openai_base_url": "OPENAI_BASE_URL",
        "openai_chat_model": "OPENAI_CHAT_MODEL",
        "openai_embedding_model": "OPENAI_EMBEDDING_MODEL",
        "api_key": "OPENAI_API_KEY",
        "chunk_strategy": "CHUNK_STRATEGY",
        "chunk_size": "CHUNK_SIZE",
        "chunk_overlap": "CHUNK_OVERLAP",
        "min_chunk_size": "MIN_CHUNK_SIZE",
        "max_chunk_size": "MAX_CHUNK_SIZE",
        "semantic_chunk_threshold": "SEMANTIC_CHUNK_THRESHOLD",
        "semantic_chunk_similarity_window_size": "SEMANTIC_CHUNK_SIMILARITY_WINDOW_SIZE",
        "max_concurrent_chunking": "MAX_CONCURRENT_CHUNKING",
    }
    for key, env_key in mapping.items():
        if updates.get(key) is not None:
            env_map[env_key] = str(updates[key])

    lines = [f"{k}={v}" for k, v in env_map.items()]
    env_path.write_text("\n".join(lines), encoding="utf-8")


def prepare_incremental_documents(kb_id: str):
    """
    基于文件哈希的增量扫描，返回需要新增/更新的文件路径及最新哈希表。
    """
    kb = get_kb_or_404(kb_id)
    loader = build_loader(kb)
    existing_hashes = kb_registry.get_file_hashes(kb_id)
    new_hashes = dict(existing_hashes)

    files = loader.scanner.scan_directory()
    candidate_paths: List[str] = []

    for file_info in files:
        if hasattr(file_info, "is_supported") and not file_info.is_supported:
            continue
        path_obj = Path(file_info.path)
        if not path_obj.exists() or not path_obj.is_file():
            continue
        try:
            rel_path = str(path_obj.relative_to(kb["data_dir"]))
        except Exception:
            rel_path = str(path_obj)

        file_md5 = kb_registry.compute_file_hash(path_obj)
        prev = existing_hashes.get(rel_path)
        new_hashes[rel_path] = file_md5

        if prev != file_md5:
            candidate_paths.append(str(path_obj))

    return loader, candidate_paths, new_hashes


def migrate_default_to_kb(target_kb_id: str = "main") -> Dict[str, Any]:
    """
    将当前默认 data_dir / index_dir 迁移复制到新的知识库目录下（安全：复制，不删除旧数据）
    """
    # 默认路径
    base_data = Path(config.data_dir).resolve()
    base_index = Path(config.index_dir).resolve()

    # 目标路径
    target_data = kb_registry.base_data_dir / target_kb_id
    target_index = kb_registry.base_index_dir / target_kb_id

    target_data.mkdir(parents=True, exist_ok=True)
    target_index.mkdir(parents=True, exist_ok=True)

    # 复制数据目录（仅在有内容且目标为空时复制）
    def copy_tree(src: Path, dst: Path) -> int:
        if not src.exists():
            return 0
        copied = 0
        for root, dirs, files in os.walk(src):
            rel = Path(root).relative_to(src)
            dest_root = dst / rel
            dest_root.mkdir(parents=True, exist_ok=True)
            for f in files:
                src_file = Path(root) / f
                dst_file = dest_root / f
                if dst_file.exists():
                    continue
                shutil.copy2(src_file, dst_file)
                copied += 1
        return copied

    copied_files = copy_tree(base_data, target_data)
    copied_index_files = copy_tree(base_index, target_index)

    # 注册 KB
    try:
        kb_registry.create_kb(
            name=target_kb_id,
            data_dir=str(target_data),
            index_dir=str(target_index),
        )
    except ValueError:
        # 已存在则忽略
        pass

    # 更新统计
    try:
        loader = DocumentLoader(data_dir=str(target_data))
        manager = IndexManager(index_dir=str(target_index))
        collect_kb_stats(target_kb_id, loader=loader, manager=manager)
    except Exception:
        pass

    return {
        "kb_id": target_kb_id,
        "data_copied": copied_files,
        "index_copied": copied_index_files,
        "target_data": str(target_data),
        "target_index": str(target_index),
    }


app = FastAPI(title="SimpleRAG API", version="0.1.0")
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
static_dir = Path(__file__).resolve().parent.parent.parent / "frontend"
if static_dir.exists():
    # 挂载到 /ui，避免与 API 路由冲突
    app.mount("/ui", StaticFiles(directory=static_dir, html=True), name="frontend")


@app.on_event("startup")
def on_startup():
    ensure_dirs()
    try:
        init_models()
        load_or_build_index_for_kb(kb_id="default", force_rebuild=False)
    except Exception as e:
        # 允许空数据启动，前端可提示上传
        print(f"[startup] 初始化时未完成索引: {e}")


@app.get("/health")
def health():
    return {"status": "ok"}

@app.get("/")
def redirect_root():
    return RedirectResponse(url="/ui/")


@app.get("/config")
def get_config():
    # Return extensive config for UI
    base_cfg = {
        "data_dir": config.data_dir,
        "index_dir": config.index_dir,
        "local_model_path": config.local_model_path,
        "ollama_model": config.ollama_model,
        "openai_base_url": config.openai_base_url,
        "openai_chat_model": config.openai_chat_model,
        "openai_embedding_model": config.openai_embedding_model,
        "api_key": config.openai_api_key,
        
        # Chunking
        "chunk_strategy": config.chunk_strategy.value if hasattr(config, 'chunk_strategy') else "semantic",
        "chunk_size": config.chunk_size,
        "chunk_overlap": config.chunk_overlap,
        "min_chunk_size": config.min_chunk_size,
        "max_chunk_size": config.max_chunk_size,
        "semantic_chunk_threshold": config.semantic_chunk_threshold,
        "semantic_chunk_similarity_window_size": config.semantic_chunk_similarity_window_size,
        "max_concurrent_chunking": config.max_concurrent_chunking,
    }
    return base_cfg


@app.post("/config")
def post_config(body: ConfigUpdate):
    updates = body.model_dump(exclude_none=True)
    if not updates:
        return {"updated": False, "message": "未提供更新项"}

    update_env_file(updates)
    # 即时更新运行时目录等关键路径
    if body.data_dir:
        os.environ["DATA_DIR"] = body.data_dir
    if body.index_dir:
        os.environ["INDEX_DIR"] = body.index_dir

    return {"updated": True, "message": "配置已写入 .env，请重启或重建索引生效"}


# ----------------- 知识库管理 API -----------------
@app.get("/kb")
def list_kb():
    return {"items": kb_registry.list_kb()}


@app.post("/kb")
def create_kb(body: KBCreateRequest):
    try:
        kb = kb_registry.create_kb(body.name, body.data_dir, body.index_dir)
        return {"kb": kb}
    except ValueError as e:
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.delete("/kb/{kb_id}")
def delete_kb(kb_id: str, body: KBDeleteRequest):
    try:
        ok = kb_registry.delete_kb(kb_id, delete_files=body.delete_files)
        return {"deleted": ok, "kb_id": kb_id}
    except ValueError as e:
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/kb/{kb_id}/files")
def list_kb_files(kb_id: str):
    kb = get_kb_or_404(kb_id)
    loader = build_loader(kb)
    files_info = loader.scanner.scan_directory()

    files_payload = []
    for f in files_info:
        files_payload.append(
            {
                "path": f.path,
                "name": getattr(f, "name", Path(f.path).name),
                "extension": getattr(f, "extension", Path(f.path).suffix),
                "file_type": getattr(f, "file_type", None).value if hasattr(f, "file_type") else None,
                "size": getattr(f, "size", None) or getattr(f, "size_bytes", None),
                "modified_time": getattr(f, "modified_time", None),
                "is_supported": getattr(f, "is_supported", True),
            }
        )

    return {"kb_id": kb_id, "files": files_payload}


@app.post("/kb/{kb_id}/upload")
async def upload_files_kb(kb_id: str, files: List[UploadFile] = File(...)):
    kb = get_kb_or_404(kb_id)
    target_dir = Path(kb["data_dir"])
    target_dir.mkdir(parents=True, exist_ok=True)
    saved = []
    for f in files:
        target = target_dir / f.filename
        with target.open("wb") as out:
            out.write(await f.read())
        saved.append(str(target))
    return {"kb_id": kb_id, "saved": saved}


@app.post("/kb/{kb_id}/rebuild")
def rebuild_kb(kb_id: str, req: RebuildRequest):
    try:
        init_models()
        force = req.force
        load_or_build_index_for_kb(kb_id=kb_id, force_rebuild=force)
        return {"success": True, "message": "索引已重建", "kb_id": kb_id}
    except HTTPException as e:
        raise e
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/kb/{kb_id}/update")
def update_kb(kb_id: str):
    try:
        kb = get_kb_or_404(kb_id)
        loader, candidate_paths, new_hashes = prepare_incremental_documents(kb_id)
        if not candidate_paths:
            return {"success": True, "kb_id": kb_id, "message": "没有新增或变更的文件", "updated_files": 0}

        # 仅加载有变更的文件
        docs = loader.reader.load_documents(
            candidate_paths,
            show_progress=False,
            max_retries=3,
        )
        if not docs:
            return {"success": False, "kb_id": kb_id, "message": "未能加载新的文档"}

        manager = build_index_manager(kb)
        manager.update_index(docs)
        kb_registry.set_file_hashes(kb_id, new_hashes)
        collect_kb_stats(kb_id, loader=loader, manager=manager)
        return {"success": True, "kb_id": kb_id, "updated_files": len(candidate_paths)}
    except HTTPException as e:
        raise e
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/kb/migrate_default")
def migrate_default_kb():
    """
    将当前 config.data_dir / config.index_dir 的内容复制到新的知识库 (main)，不删除旧数据
    """
    try:
        result = migrate_default_to_kb("main")
        return {"success": True, **result}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/upload")
async def upload_files(files: List[UploadFile] = File(...)):
    kb = get_kb_or_404("default")
    target_dir = Path(kb["data_dir"])
    target_dir.mkdir(parents=True, exist_ok=True)
    saved = []
    for f in files:
        target = target_dir / f.filename
        with target.open("wb") as out:
            out.write(await f.read())
        saved.append(str(target))
    return {"kb_id": "default", "saved": saved}


@app.post("/rebuild")
def rebuild(req: RebuildRequest):
    try:
        init_models()
        load_or_build_index_for_kb(kb_id=req.kb_id, force_rebuild=req.force)
        return {"success": True, "message": "索引已重建", "kb_id": req.kb_id}
    except HTTPException as e:
        raise e
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/query")
def query(body: QueryRequest):
    """非流式查询接口"""
    if not body.query.strip():
        raise HTTPException(status_code=400, detail="查询内容为空")

    kb = get_kb_or_404(body.kb_id)
    manager = build_index_manager(kb)

    try:
        init_models()
        index = manager.load_index()
        if index is None or rag_engine.retriever is None:
            load_or_build_index_for_kb(kb_id=body.kb_id, force_rebuild=False)
            index = manager.load_index()
    except Exception:
        # 若无索引，则尝试加载/创建
        load_or_build_index_for_kb(kb_id=body.kb_id, force_rebuild=False)
        index = manager.load_index()

    if index is None:
        raise HTTPException(status_code=500, detail="索引未就绪")

    # 更新全局 RAG 引擎指向当前知识库索引
    rag_engine.update_index(index)

    if body.mode == "agent":
        agent = AgentService()
        result = agent.run(query=body.query)
        return {
            "mode": "agent",
            "answer": result.get("answer"),
            "steps": result.get("steps", []),
            "sources": result.get("sources", []),
        }

    # RAG 模式
    response = rag_engine.query_with_details(body.query)
    payload = {
        "mode": "rag",
        "answer": response.answer,
        "retrieval_time": response.retrieval_time,
        "generation_time": response.generation_time,
        "total_time": response.total_time,
        "kb_id": body.kb_id,
    }
    if body.show_sources:
        payload["sources"] = response.sources
    return payload


@app.post("/query_stream")
def query_stream(body: QueryRequest):
    """
    流式返回查询结果（前端使用 fetch + reader）
    使用真实流式生成 (Real Streaming)
    """
    if not body.query.strip():
        raise HTTPException(status_code=400, detail="查询内容为空")

    kb = get_kb_or_404(body.kb_id)
    manager = build_index_manager(kb)

    # 确保模型/索引就绪
    try:
        init_models()
        index = manager.load_index()
        if index is None or rag_engine.retriever is None:
            load_or_build_index_for_kb(kb_id=body.kb_id, force_rebuild=False)
            index = manager.load_index()
    except Exception:
        load_or_build_index_for_kb(kb_id=body.kb_id, force_rebuild=False)
        index = manager.load_index()

    if index is None:
        raise HTTPException(status_code=500, detail="索引未就绪")

    rag_engine.update_index(index)

    def stream():
        try:
            if body.mode == "agent":
                agent = AgentService()
                # Agent 目前使用伪流式（或 Step 级流式），取决于实现
                result_gen = agent.run_stream(query=body.query)
                for chunk in result_gen:
                    yield chunk
                    
                # Agent 目前没有 sources 返回逻辑一致性，若有可在 AgentService 中补充
                # 这里假设 Agent 内部没有 yield sources
                # 如果 AgentService.run_stream 最后 yield 了 sources dict?
                # 暂不处理，Agent 模式主要关注工具调用
            else:
                # RAG 模式使用 LlamaIndex 真实流式
                for chunk in rag_engine.query_stream(body.query):
                    if isinstance(chunk, dict) and chunk.get("__type__") == "sources":
                        if body.show_sources:
                            yield "\n\n[SOURCES]\n" + json.dumps(chunk["data"], ensure_ascii=False)
                    else:
                        yield str(chunk)
                        
        except Exception as e:
            yield f"[ERROR]{str(e)}"

    return StreamingResponse(stream(), media_type="text/plain; charset=utf-8")


@app.get("/tools")
def list_tools():
    return [
        {"name": "rag_search", "description": "基于现有知识库的检索增强回答"},
        {"name": "prompt_templates", "description": "查看可用提示词模板"},
    ]
