# 使用sentence-transformers进行加载
import os
from pathlib import Path
from typing import List, Tuple

import faiss
import numpy as np
import torch
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import FAISS
from sentence_transformers import SentenceTransformer

from pdf_parse import DataProcess
from utils.utils import get_path, timer, torch_gc


# 自定义轻量级 Embeddings 类，适配 sentence-transformers
class SentenceTransformerEmbeddings(Embeddings):
    @timer
    def __init__(self, model_name: str, device: str = None, batch_size: int = 64):
        self.model = SentenceTransformer(model_name)
        self.model.eval()
        if device is None:
            device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model.to(device)
        self.batch_size = batch_size
        self.device = device

    @timer
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        embeddings = self.model.encode(
            texts,
            batch_size=self.batch_size,
            show_progress_bar=True,
            convert_to_numpy=True,
        )
        # ✅ 归一化
        embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
        return embeddings.tolist()

    @timer
    def embed_query(self, text: str) -> List[float]:
        embedding = self.model.encode([text], convert_to_numpy=True)[0]
        embedding = embedding / np.linalg.norm(embedding)  # L2 归一化
        return embedding.tolist()


class FaissRetriever:
    @timer
    def __init__(
        self,
        model_path: str,
        data: List[str],
        rewrite: bool = False,
        index_name: str = "faiss_index",
    ):
        self.model_path = model_path
        self.index_name = index_name
        index_path = Path(get_path(index_name))
        faiss_index_file = index_path / f"{index_name}.faiss"
        faiss_pkl_file = index_path / f"{index_name}.pkl"

        # 构建文档列表
        documents = [
            Document(
                page_content=line.strip("\n").strip().split("\t")[0],
                metadata={"id": idx},
            )
            for idx, line in enumerate(data)
        ]
        texts = [doc.page_content for doc in documents]

        if faiss_index_file.exists() and faiss_pkl_file.exists() and not rewrite:
            print(f"✅ Loading existing FAISS index from {index_path}")
            # 必须先加载 embedding 模型才能 load_local
            self.embeddings = SentenceTransformerEmbeddings(
                model_path, device="cuda" if torch.cuda.is_available() else "cpu"
            )
            self.vector_store = FAISS.load_local(
                str(index_path),
                self.embeddings,
                index_name=index_name,
                allow_dangerous_deserialization=True,
            )
        else:
            print("🚀 Building new FAISS index with parallel encoding...")
            os.makedirs(index_path, exist_ok=True)

            # 直接使用 sentence-transformers 批量编码
            st_model = SentenceTransformer(model_path)
            st_model.to("cuda" if torch.cuda.is_available() else "cpu")
            st_model.eval()

            with torch.no_grad():
                # 并行批量编码
                embeddings_array = st_model.encode(
                    texts,
                    batch_size=64,
                    show_progress_bar=True,
                    convert_to_numpy=True,
                    device="cuda" if torch.cuda.is_available() else "cpu",
                )
            # 归一化
            embeddings_array = embeddings_array / np.linalg.norm(embeddings_array, axis=1, keepdims=True)
            # 使用 LangChain FAISS 构建索引（底层是 faiss-cpu/faiss-gpu）
            self.embeddings = SentenceTransformerEmbeddings(
                model_path
            )  # 用于 query 编码
            self.vector_store = FAISS(
                embedding_function=self.embeddings,
                index=faiss.IndexFlatIP(
                    embeddings_array.shape[1]
                ),  # Inner Product for cosine similarity
                docstore=InMemoryDocstore(),
                index_to_docstore_id={},
            )
            self.vector_store.add_embeddings(
                list(zip(texts, embeddings_array.astype(np.float32))),
                metadatas=[{"id": i} for i in range(len(texts))],
            )
            self.vector_store.save_local(str(index_path), index_name=index_name)

            # 卸载模型释放内存
            del st_model
            torch.cuda.empty_cache()

    @timer
    def get_topk(self, query: str, k: int) -> List[Tuple[Document, float]]:
        if not query.strip():
            return []
        try:
            return self.vector_store.similarity_search_with_score(query, k=k)
        except Exception as e:
            print(f"Search failed: {e}")
            return []
    
    @timer
    def get_vector_store(self):
        return self.vector_store


if __name__ == "__main__":
    model_name = "moka-ai/m3e-large"  # text2vec-large-chinese
    pdf_path = "data/train_a.pdf"
    dataprocess = DataProcess(pdf_path)
    dataprocess.parse_blocks(max_seq=1024)
    dataprocess.parse_blocks(max_seq=512)
    print(len(dataprocess.data))
    dataprocess.parse_all_page(max_seq=256)
    dataprocess.parse_all_page(max_seq=512)
    print(len(dataprocess.data))
    dataprocess.parse_one_page_with_rule(max_seq=256)
    dataprocess.parse_one_page_with_rule(max_seq=512)
    print(len(dataprocess.data))
    data = dataprocess.data

    retriever = FaissRetriever(model_name, data, rewrite=False)
    test_queries = [
        "如何预防新冠肺炎",
        "交通事故如何处理",
        "吉利集团的董事长是谁",
        "吉利汽车语音助手叫什么",
    ]
    for q in test_queries:
        results = retriever.get_topk(q, k=6)
        print(f"\nQuery: {q}")
        for doc, score in results:
            print(f"  Score: {score:.4f} | Content: {doc.page_content[:60]}...")
    
    torch_gc()
