import os
from pathlib import Path
from threading import Lock

import faiss
import numpy as np
import torch
from langchain.docstore import InMemoryDocstore
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.schema import Document
from langchain.vectorstores import FAISS

from pdf_parse import DataProcess
from utils.utils import get_path, logger, timer, torch_gc


class FaissRetriever(object):
    # 🔒 类级锁：保护 GPU 资源访问
    _gpu_lock = Lock()

    @timer
    def __init__(
        self, model_path, data, rewrite: bool = False, index_name: str = "faiss_index"
    ):
        # 使用 GPU 进行 embedding 计算
        self.embeddings = HuggingFaceEmbeddings(
            model_name=model_path,
            model_kwargs={"device": "cuda"},
            encode_kwargs={
                "batch_size": 64,
                "normalize_embeddings": True,
            },
        )

        documents = []
        for idx, line in enumerate(data):
            line = line.strip("\n").strip()
            words = line.split("\t")
            content = words[0] if words else ""
            documents.append(Document(page_content=content, metadata={"id": idx}))

        self.documents = documents
        index_path = get_path(index_name)
        os.makedirs(index_path, exist_ok=True)

        faiss_index_file = Path(index_path) / f"{index_name}.faiss"
        faiss_pkl_file = Path(index_path) / f"{index_name}.pkl"
        emb_cache_file = Path(index_path) / "embeddings.npy"

        # 初始化 GPU 状态
        self._is_on_gpu = False
        self._gpu_resources = None
        self._gpu_index = None

        if (
            faiss_index_file.exists()
            and faiss_pkl_file.exists()
            and emb_cache_file.exists()
            and not rewrite
        ):
            logger.info(
                f"✅ Loading existing FAISS index and embeddings from {index_path}"
            )
            embeddings_array = np.load(emb_cache_file)
            # 加载时 index 是 CPU 的
            self.vector_store = FAISS.load_local(
                index_path,
                self.embeddings,
                index_name=index_name,
                allow_dangerous_deserialization=True,
            )
            # 注意：不要在这里 move_to_gpu，避免保存时报错
        else:
            logger.info(f"🚀 Building new FAISS index with {model_path}...")
            texts = [doc.page_content for doc in documents]
            logger.info(f"⏳ Encoding documents with {model_path}...")
            embeddings_list = self.embeddings.embed_documents(texts)
            embeddings_array = np.array(embeddings_list, dtype=np.float32)
            logger.info(f"Embeddings shape: {embeddings_array.shape}")
            faiss.normalize_L2(embeddings_array)

            dim = embeddings_array.shape[1]
            cpu_index = faiss.IndexFlatIP(dim)  # 内积（相似度）
            logger.info("⏳ Adding embeddings to index...")
            cpu_index.add(embeddings_array)

            # 构建 docstore
            docstore = InMemoryDocstore()
            ids = [str(i) for i in range(len(documents))]
            for i, doc in enumerate(documents):
                docstore._dict[ids[i]] = doc

            self.vector_store = FAISS(
                embedding_function=self.embeddings.embed_query,
                index=cpu_index,
                docstore=docstore,
                index_to_docstore_id=ids,
            )

            logger.info(f"💾 Saving FAISS index and embeddings to {index_path}")
            self.vector_store.save_local(index_path, index_name=index_name)
            np.save(emb_cache_file, embeddings_array)

        # 清理缓存
        torch.cuda.empty_cache()

    @timer
    def get_topk(self, query, k):
        if not query.strip():
            return []
        try:
            with self._gpu_lock:
                # ✅ 懒加载：第一次检索时才移到 GPU
                if not self._is_on_gpu:
                    logger.info("⏳ Moving FAISS index to GPU for faster search...")
                    try:
                        self._gpu_resources = faiss.StandardGpuResources()
                        self._gpu_index = faiss.index_cpu_to_gpu(
                            self._gpu_resources, 0, self.vector_store.index
                        )
                        self._is_on_gpu = True
                        logger.info("✅ FAISS index moved to GPU")
                    except Exception as e:
                        logger.error(f"⚠️ GPU transfer failed: {e}")
                        # fallback 到 CPU 索引
                        self._gpu_index = self.vector_store.index

                # 执行检索
                query_embedding = np.array(
                    [self.embeddings.embed_query(query)], dtype=np.float32
                )
                scores, indices = self._gpu_index.search(query_embedding, k=k)

                # 构造结果
                results = []
                for score, idx in zip(scores[0], indices[0]):
                    if idx == -1:
                        continue
                    doc_id = str(idx)
                    if doc_id in self.vector_store.docstore._dict:
                        doc = self.vector_store.docstore._dict[doc_id]
                        results.append((doc, float(score)))
                return results

        except Exception as e:
            logger.error(f"Search failed: {e}")
            return []

    @timer
    def get_vector_store(self):
        return self.vector_store

    def to_gpu(self):
        """手动将索引移到 GPU"""
        if not self._is_on_gpu:
            with self._gpu_lock:
                if not self._is_on_gpu:
                    try:
                        self._gpu_resources = faiss.StandardGpuResources()
                        self._gpu_index = faiss.index_cpu_to_gpu(
                            self._gpu_resources, 0, self.vector_store.index
                        )
                        self._is_on_gpu = True
                        logger.info("✅ FAISS index moved to GPU (manual)")
                    except Exception as e:
                        logger.error(f"⚠️ GPU transfer failed: {e}")

    def to_cpu(self):
        """释放 GPU 索引资源"""
        if self._is_on_gpu:
            with self._gpu_lock:
                if self._is_on_gpu:
                    # 注意：不需要写回，除非你要保存
                    self._gpu_index = None
                    self._gpu_resources = None
                    self._is_on_gpu = False
                    torch.cuda.empty_cache()
                    logger.info("✅ FAISS index released from GPU")

    def __del__(self):
        # 确保释放 GPU 资源
        if hasattr(self, "_gpu_resources") and self._is_on_gpu:
            self.to_cpu()


if __name__ == "__main__":
    # text2vec-large-chinese
    model_name = "moka-ai/m3e-large"
    pdf_path = "data/train_a.pdf"
    data_process = DataProcess(pdf_path)
    data_process.parse_blocks(max_seq=1024)
    data_process.parse_blocks(max_seq=512)
    logger.info(len(data_process.data))
    data_process.parse_all_page(max_seq=256)
    data_process.parse_all_page(max_seq=512)
    logger.info(len(data_process.data))
    data_process.parse_one_page_with_rule(max_seq=256)
    data_process.parse_one_page_with_rule(max_seq=512)
    logger.info(len(data_process.data))
    data = data_process.data

    retriever = FaissRetriever(model_name, data, rewrite=True)
    test_queries = [
        "如何预防新冠肺炎",
        "交通事故如何处理",
        "吉利集团的董事长是谁",
        "吉利汽车语音助手叫什么",
    ]
    for query in test_queries:
        results = retriever.get_topk(query, k=6)
        logger.info(f"\nQuery: {query}")
        for doc, score in results:
            logger.info(f"  Score: {score:.4f} | Content: {doc.page_content[:60]}...")
    torch_gc()
