import time
import pdb
import utils
import pacmap
import pandas as pd
import numpy as np
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.vectorstores.utils import DistanceStrategy
import plotly.express as px


class KnowledgeBase:
    def __init__(self, corpus_df, embedding_model_name, device):
        self.device = device
        self.corpus_df = corpus_df
        self.vector_domain_knowledge = None
        self.embedding_model_name = embedding_model_name
        self.embedding_model = None

        self._process_corpus()

    def _process_corpus(self):
        start_time = time.time()

        processed_docs = utils.process_documents(self.corpus_df)
        end_time = time.time()
        print("process documents cost=", end_time - start_time)
        start_time = time.time()
        self.vector_domain_knowledge = self._build_vector_domain_knowledge(processed_docs)
        end_time = time.time()
        print("build vector domain knowledge cost=", end_time - start_time)

    def _build_vector_domain_knowledge(self, processed_docs):
        # 构建知识向量域的实现
        """
        将输入的文档创建向量库
        Args:
            docs:
            test_user_query:
            is_embedding_visualize:
        Returns:

        """
        # pdb.set_trace()
        # multi_process=True 会报错
        self.embedding_model = HuggingFaceEmbeddings(
            model_name=self.embedding_model_name,
            multi_process=False,
            model_kwargs={"device": self.device},
            encode_kwargs={"normalize_embeddings": True},  # set True for cosine similarity
        )
        KNOWLEDGE_VECTOR_DATABASE = FAISS.from_documents(
            processed_docs, self.embedding_model, distance_strategy=DistanceStrategy.COSINE
        )
        # pdb.set_trace()
        return KNOWLEDGE_VECTOR_DATABASE

    def retrieved_docs_vector(self, user_question):
        # 根据用户问题检索文档的实现
        # print(f"\nStarting retrieval for {user_question=}...")
        start_time = time.time()
        retrieved_docs = self.vector_domain_knowledge.similarity_search(query=user_question, k=5)
        # print("\n==================================Top document==================================")
        # print(retrieved_docs[0].page_content)
        # print("==================================Metadata==================================")
        # print(retrieved_docs[0].metadata)
        end_time = time.time()
        print("retrieved docs vector cost=", end_time - start_time)
        return retrieved_docs

    def _embedding_visualize(self, docs, test_user_query):
        """
        # # Should save KNOWLEDGE_VECTOR_DATABASE to disk
        # # user_query = "买Nio手机，开发票吗？"
        Args:
            docs:
            test_user_query:

        Returns:

        """

        print("user_query=", test_user_query)
        query_vector = self.embedding_model.embed_query(test_user_query)
        print("query_vector shape=", len(query_vector))

        # project our embeddings from 512 dimensions down to 2 dimensions using PaCMAP.
        embedding_projector = pacmap.PaCMAP(n_components=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0,
                                            random_state=1)

        embeddings_2d = [
                            list(self.vector_domain_knowledge.index.reconstruct_n(idx, 1)[0]) for idx in
                            range(len(docs))
                        ] + [query_vector]

        # fit the data (The index of transformed data corresponds to the index of the original data)
        documents_projected = embedding_projector.fit_transform(np.array(embeddings_2d), init="pca")

        temp_list = []
        for i in range(len(docs)):
            tmp_dict = {"x": documents_projected[i, 0], "y": documents_projected[i, 1],
                        "source": docs[i].metadata["source"].split("/")[1],
                        "extract": docs[i].page_content[:100] + "...",
                        "symbol": "circle", "size_col": 4}
            temp_list.append(tmp_dict)

        temp_list2 = [
            {
                "x": documents_projected[-1, 0],
                "y": documents_projected[-1, 1],
                "source": "User query",
                "extract": test_user_query,
                "size_col": 100,
                "symbol": "star",
            }
        ]
        df = pd.DataFrame.from_dict(temp_list + temp_list2)
        # visualize the embedding
        fig = px.scatter(
            df,
            x="x",
            y="y",
            color="source",
            hover_data="extract",
            size="size_col",
            symbol="symbol",
            color_discrete_map={"User query": "black"},
            width=1000,
            height=700,
        )
        fig.update_traces(
            marker=dict(opacity=1, line=dict(width=0, color="DarkSlateGrey")),
            selector=dict(mode="markers"),
        )
        fig.update_layout(
            legend_title_text="<b>Chunk source</b>",
            title="<b>2D Projection of Chunk Embeddings via PaCMAP</b>",
        )
        fig.show()
