import json
import pickle
from typing import List, Dict
from pathlib import Path
import faiss
from pypdf import PdfReader
from sentence_transformers import SentenceTransformer
from modelscope import snapshot_download

DATA_DIR = Path("data")
INDEX_DIR = Path("indexes")
INDEX_DIR.mkdir(exist_ok=True, parents=True)

EMBEDDING_MODEL_NAME = "BAAI/bge-small-zh-v1.5"
EMBED_DIM = 512  # bge-small-zh-v1.5 输出维度
CHUNK_SIZE = 600
CHUNK_OVERLAP = 120


def read_pdf_text(pdf_path: Path) -> str:
    return "\n".join(page.extract_text() or "" for page in PdfReader(str(pdf_path)).pages)


def read_md_text(md_path: Path) -> str:
    return md_path.read_text(encoding="utf-8", errors="ignore")


def split_into_chunks(text: str, chunk_size: int, overlap: int) -> List[str]:
    text = " ".join(text.split())
    chunks = []
    start = 0
    while start < len(text):
        end = min(start + chunk_size, len(text))
        chunks.append(text[start:end])
        if end == len(text):
            break
        start = end - overlap
        if start < 0:
            start = 0
    return chunks


def load_documents(data_dir: Path) -> List[Dict]:
    documents = []
    for path in data_dir.rglob("*"):
        if path.suffix.lower() in [".pdf", ".md", ".txt"]:
            if path.suffix.lower() == ".pdf":
                raw = read_pdf_text(path)
            else:
                raw = read_md_text(path)
            chunks = split_into_chunks(raw, CHUNK_SIZE, CHUNK_OVERLAP)
            for idx, chunk in enumerate(chunks):
                documents.append({
                    "text": chunk,
                    "source": str(path),
                    "chunk_id": f"{path.name}::chunk_{idx}"
                })
    return documents


def main():
    if not DATA_DIR.exists():
        raise FileNotFoundError(f"未找到数据目录 {DATA_DIR.resolve()}，请创建并放入 PDF/MD/TXT 文件")

    print("加载文档并切分...")
    docs = load_documents(DATA_DIR)
    print(f"共 {len(docs)} 个文本块")

    if len(docs) == 0:
        raise RuntimeError("data/ 目录为空或无法解析文本，请放入 PDF/MD/TXT 文件后重试")

    print("加载嵌入模型...")
    CACHE_DIR = Path("model_cache")
    CACHE_DIR.mkdir(exist_ok=True, parents=True)
    
    # 检查本地是否已存在模型
    model = None
    CACHE_DIR.mkdir(exist_ok=True, parents=True)
    
    # 查找所有可能的模型路径
    def find_model_paths(base_dir):
        """递归查找所有可能的模型目录"""
        model_paths = []
        
        # 检查基础目录本身
        if (base_dir / EMBEDDING_MODEL_NAME.replace('/', '_')).exists():
            model_paths.append(base_dir / EMBEDDING_MODEL_NAME.replace('/', '_'))
        
        # 递归搜索所有目录，优先查找包含sentence_bert_config.json的目录
        for item in base_dir.rglob("*"):
            if item.is_dir():
                # 检查是否是sentence transformer模型目录
                if item.joinpath("sentence_bert_config.json").exists():
                    model_paths.append(item)
                # 或者包含模型特征文件
                config_files = list(item.glob("config.json")) + list(item.glob("pytorch_model.bin"))
                if config_files:
                    model_paths.append(item)
        
        return model_paths
    
    # 获取所有可能的模型路径
    possible_model_paths = find_model_paths(CACHE_DIR)
    
    # 如果没找到，检查特定路径
    if not possible_model_paths:
        possible_model_paths = [
            CACHE_DIR / f"models--{EMBEDDING_MODEL_NAME.replace('/', '--')}",  # ModelScope格式
            CACHE_DIR / EMBEDDING_MODEL_NAME.replace('/', '_'),  # 简单替换格式
            CACHE_DIR / "BAAI",  # 实际存在的目录
            CACHE_DIR / "BAAI/bge-small-zh-v1.5",  # 标准格式
            CACHE_DIR / "BAAI/bge-small-zh-v1___5",  # 实际存在的格式 (带下划线)
        ]
    
    # 调试：显示所有检查的路径
    print(f"调试信息：检查以下路径：")
    for path in possible_model_paths:
        exists = path.exists()
        is_dir = path.is_dir() if exists else False
        print(f"  {path} - 存在: {exists}, 是目录: {is_dir}")
    
    # 检查是否存在本地模型
    local_model_found = False
    for model_path in possible_model_paths:
        if model_path.exists() and model_path.is_dir():
            print(f"发现本地模型: {model_path}")
            try:
                model = SentenceTransformer(str(model_path), device='cpu', cache_folder=CACHE_DIR)
                print("本地模型加载成功")
                local_model_found = True
                break
            except Exception as e:
                print(f"本地模型加载失败: {e}")
                continue
    
    # 如果没有找到本地模型或加载失败，则下载
    if not local_model_found:
        print(f"未发现本地模型，准备下载: {EMBEDDING_MODEL_NAME}")
        try:
            print("正在从ModelScope下载模型...")
            # 使用modelscope下载模型
            local_model_path = snapshot_download(model_id=EMBEDDING_MODEL_NAME, cache_dir=CACHE_DIR)
            print(f"模型已下载到: {local_model_path}")
            # 重新加载模型
            model = SentenceTransformer(local_model_path, device='cpu', cache_folder=CACHE_DIR)
            print("模型重新加载成功")
        except Exception as download_e:
            print(f"模型下载失败: {download_e}")

    print("向量化...")
    texts = [d["text"] for d in docs]
    embeddings = model.encode(texts, batch_size=64, show_progress_bar=True, normalize_embeddings=True)
    embeddings = embeddings.astype("float32")

    print("构建 FAISS 索引...")
    index = faiss.IndexFlatIP(EMBED_DIM)  # 使用内积，已归一化=余弦相似
    index.add(embeddings)

    print("保存索引与元数据...")
    faiss.write_index(index, str(INDEX_DIR / "vectors.faiss"))
    with open(INDEX_DIR / "metadatas.pkl", "wb") as f:
        pickle.dump(docs, f)
    with open(INDEX_DIR / "config.json", "w", encoding="utf-8") as f:
        # 使用实际加载的模型名称
        model_name = EMBEDDING_MODEL_NAME
        dim = 512
        json.dump({"model": model_name, "dim": dim}, f, ensure_ascii=False, indent=2)

    print("完成。")


if __name__ == "__main__":
    main() 