import os
import pdb
from typing import Optional, List, Tuple
from tqdm.notebook import tqdm
import pandas as pd
import matplotlib.pyplot as plt

pd.set_option("display.max_colwidth", None)  # this will be helpful when visualizing retriever outputs

from langchain.docstore.document import Document as LangchainDocument
from langchain.text_splitter import RecursiveCharacterTextSplitter
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer
from langchain.vectorstores import FAISS
# from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
# from langchain_community.vectorstores.utils import DistanceStrategy
from langchain.vectorstores.utils import DistanceStrategy
import pacmap
import numpy as np
import plotly.express as px
from transformers import pipeline
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig

EMBEDDING_MODEL_NAME = "/b4-ai/share_model_zoo/embedding/BAAI/bge-base-zh-v1.5/"
READER_MODEL_NAME = "/b4-ai/share_model_zoo/LLM/Qwen/Qwen1.5-7B-Chat"

MARKDOWN_SEPARATORS = [
    "\n#{1,6} ",
    "```\n",
    "\n\\*\\*\\*+\n",
    "\n---+\n",
    "\n___+\n",
    "\n\n",
    "\n",
    " ",
    "",
]
device = "cuda"

KNOWLEDGE_VECTOR_DATABASE = None


def split_documents(
        chunk_size: int,
        knowledge_base: List[LangchainDocument],
        tokenizer_name: Optional[str] = EMBEDDING_MODEL_NAME,
) -> List[LangchainDocument]:
    """
    Split documents into chunks of maximum size `chunk_size` tokens and return a list of documents.
    """
    text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
        AutoTokenizer.from_pretrained(tokenizer_name),
        chunk_size=chunk_size,
        chunk_overlap=int(chunk_size / 10),
        add_start_index=True,
        strip_whitespace=True,
        separators=MARKDOWN_SEPARATORS,
    )

    docs_processed = []
    for doc in knowledge_base:
        docs_processed += text_splitter.split_documents([doc])

    # Remove duplicates
    unique_texts = {}
    docs_processed_unique = []
    for doc in docs_processed:
        if doc.page_content not in unique_texts:
            unique_texts[doc.page_content] = True
            docs_processed_unique.append(doc)

    return docs_processed_unique


def process_documents(task_df, is_analysis=False):
    """
    对已经经过预处理的 task_df 进行预处理，主要包括文档分割等操作
    Args:
        task_df:
        is_analysis:
    Returns:

    """
    task_df['text'] = task_df['question'] + '？' + task_df['answer']
    # 增加 source 字段
    task_df['source'] = task_df.reset_index().index
    task_df['source'] = task_df['source'].astype(str)
    task_df['source'] = task_df['source'] + "/" + task_df['question_type']

    print("Start do process_documents")
    # pdb.set_trace()
    RAW_KNOWLEDGE_BASE = [
        LangchainDocument(page_content=doc["text"], metadata={"source": doc['source']}) for _, doc in
        tqdm(task_df.iterrows(), total=len(task_df))
    ]

    # We use a hierarchical list of separators specifically tailored for splitting Markdown documents
    # This list is taken from LangChain's MarkdownTextSplitter class.

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,  # the maximum number of characters in a chunk: we selected this value arbitrarily
        chunk_overlap=100,  # the number of characters to overlap between chunks
        add_start_index=True,  # If `True`, includes chunk's start index in metadata
        strip_whitespace=True,  # If `True`, strips whitespace from the start and end of every document
        separators=MARKDOWN_SEPARATORS,
    )

    docs_processed = []
    for doc in RAW_KNOWLEDGE_BASE:
        docs_processed += text_splitter.split_documents([doc])
    print("len(docs_processed)=", len(docs_processed))

    if is_analysis:
        # To get the value of the max sequence_length, we will query the underlying `SentenceTransformer` object
        # used in the RecursiveCharacterTextSplitter.
        print(f"Model's maximum sequence length: {SentenceTransformer(EMBEDDING_MODEL_NAME).max_seq_length}")
        tokenizer = AutoTokenizer.from_pretrained(EMBEDDING_MODEL_NAME)
        # 分析每个文档中的 token 数
        lengths = [len(tokenizer.encode(doc.page_content)) for doc in tqdm(docs_processed)]

        # Plot the distrubution of document lengths, counted as the number of tokens
        fig = pd.Series(lengths).hist()
        plt.title("Distribution of document lengths in the knowledge base (in count of tokens)")
        plt.show()

    # 如果上述分析发现文档长度超出 max_seq_length, 那么需要对文档进行切割处理
    docs_processed = split_documents(
        512,  # We choose a chunk size adapted to our model
        RAW_KNOWLEDGE_BASE,
        tokenizer_name=EMBEDDING_MODEL_NAME,
    )
    if is_analysis:
        tokenizer = AutoTokenizer.from_pretrained(EMBEDDING_MODEL_NAME)
        lengths = [len(tokenizer.encode(doc.page_content)) for doc in tqdm(docs_processed)]
        fig = pd.Series(lengths).hist()
        plt.title("Distribution of document lengths in the knowledge base (in count of tokens)")
        plt.show()

    return docs_processed


def build_vector_domain_knowledge(docs, test_user_query="", is_embedding_visualize=False):
    """
    将输入的文档创建向量库
    Args:
        docs:
        test_user_query:
        is_embedding_visualize:
    Returns:

    """
    embedding_model = HuggingFaceEmbeddings(
        model_name=EMBEDDING_MODEL_NAME,
        multi_process=True,
        model_kwargs={"device": device},
        encode_kwargs={"normalize_embeddings": True},  # set True for cosine similarity
    )
    global KNOWLEDGE_VECTOR_DATABASE
    KNOWLEDGE_VECTOR_DATABASE = FAISS.from_documents(
        docs, embedding_model, distance_strategy=DistanceStrategy.COSINE
    )

    # Should save KNOWLEDGE_VECTOR_DATABASE to disk
    # user_query = "买Nio手机，开发票吗？"
    if test_user_query:
        print("user_query=", test_user_query)
        query_vector = embedding_model.embed_query(test_user_query)
        print("query_vector shape=", len(query_vector))

    if is_embedding_visualize and test_user_query:
        # project our embeddings from 512 dimensions down to 2 dimensions using PaCMAP.
        embedding_projector = pacmap.PaCMAP(n_components=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0,
                                            random_state=1)

        embeddings_2d = [
                            list(KNOWLEDGE_VECTOR_DATABASE.index.reconstruct_n(idx, 1)[0]) for idx in
                            range(len(docs))
                        ] + [query_vector]

        # fit the data (The index of transformed data corresponds to the index of the original data)
        documents_projected = embedding_projector.fit_transform(np.array(embeddings_2d), init="pca")

        temp_list = []
        for i in range(len(docs)):
            tmp_dict = {"x": documents_projected[i, 0], "y": documents_projected[i, 1],
                        "source": docs[i].metadata["source"].split("/")[1],
                        "extract": docs[i].page_content[:100] + "...",
                        "symbol": "circle", "size_col": 4}
            temp_list.append(tmp_dict)

        temp_list2 = [
            {
                "x": documents_projected[-1, 0],
                "y": documents_projected[-1, 1],
                "source": "User query",
                "extract": test_user_query,
                "size_col": 100,
                "symbol": "star",
            }
        ]
        df = pd.DataFrame.from_dict(temp_list + temp_list2)
        # visualize the embedding
        fig = px.scatter(
            df,
            x="x",
            y="y",
            color="source",
            hover_data="extract",
            size="size_col",
            symbol="symbol",
            color_discrete_map={"User query": "black"},
            width=1000,
            height=700,
        )
        fig.update_traces(
            marker=dict(opacity=1, line=dict(width=0, color="DarkSlateGrey")),
            selector=dict(mode="markers"),
        )
        fig.update_layout(
            legend_title_text="<b>Chunk source</b>",
            title="<b>2D Projection of Chunk Embeddings via PaCMAP</b>",
        )
        fig.show()


def retrieved_docs_vector(user_query):
    print(f"\nStarting retrieval for {user_query=}...")
    global KNOWLEDGE_VECTOR_DATABASE  #
    retrieved_docs = KNOWLEDGE_VECTOR_DATABASE.similarity_search(query=user_query, k=5)
    print("\n==================================Top document==================================")
    print(retrieved_docs[0].page_content)
    print("==================================Metadata==================================")
    print(retrieved_docs[0].metadata)
    return retrieved_docs


def build_reader_llm():
    bnb_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_use_double_quant=True,
        bnb_4bit_quant_type="nf4",
        bnb_4bit_compute_dtype=torch.bfloat16,
    )
    # device_map='cuda'
    # model = AutoModelForCausalLM.from_pretrained(READER_MODEL_NAME, quantization_config=bnb_config)  # CPU 版
    model = AutoModelForCausalLM.from_pretrained(READER_MODEL_NAME, quantization_config=bnb_config,
                                                 device_map=device)  # GPU 版
    tokenizer = AutoTokenizer.from_pretrained(READER_MODEL_NAME)

    READER_LLM = pipeline(
        model=model,
        tokenizer=tokenizer,
        task="text-generation",
        do_sample=True,
        temperature=0.2,
        repetition_penalty=1.1,
        return_full_text=False,
        max_new_tokens=500,
    )
    return READER_LLM, tokenizer


def rag_with_llm(retrieved_docs, rag_model, tokenizer, question):
    """

    Args:
        retrieved_docs:
        rag_model:
        tokenizer:
        question:

    Returns:

    """
    # 中文版
    prompt_in_chat_format = [
        {
            "role": "system",
            "content": """使用上下文中包含的信息，对问题给出全面的回答。
            只回答所问的问题，回答应简洁并与问题相关。在相关时提供来源文档的编号。如果答案不能从上下文中推断出来，则不给出答案。""",
        },
        {
            "role": "user",
            "content": """Context:
    {context}
    ---
    以下是你需要回答的问题.

    Question: {question}""",
        },
    ]
    RAG_PROMPT_TEMPLATE = tokenizer.apply_chat_template(
        prompt_in_chat_format, tokenize=False, add_generation_prompt=True
    )
    print("RAG_PROMPT_TEMPLATE=", RAG_PROMPT_TEMPLATE)

    retrieved_docs_text = [doc.page_content for doc in retrieved_docs]  # we only need the text of the documents
    context = "\nExtracted documents:\n"
    context += "".join([f"Document {str(i)}:::\n" + doc for i, doc in enumerate(retrieved_docs_text)])

    final_prompt = RAG_PROMPT_TEMPLATE.format(question=question, context=context)

    # Redact an answer
    answer = rag_model(final_prompt)[0]["generated_text"]
    # print(answer)
    return answer


if __name__ == '__main__':
    # 读取原始领域内语料
    data_dir = "./datas"
    file_name = "NIOPhone产品咨询类FAQ-达芬奇产品咨询类.csv"
    file_excel_name = "NIOPhone产品咨询类FAQ.xlsx"
    file_path = os.path.join(data_dir, file_name)
    file_excel_path = os.path.join(data_dir, file_excel_name)
    df = pd.read_csv(file_path, sep=',', encoding='utf-8')
    df = df[['咨询问题（问题主干）', '话术', '关机']]
    df.rename(columns={'咨询问题（问题主干）': 'question', '话术': 'answer', '关机': 'question_type'}, inplace=True)
    print("raw df.shape=", df.shape)
    processed_docs = process_documents(df)
    build_vector_domain_knowledge(processed_docs)
    user_question = "截图有快捷键吗？"
    retrieved_docs = retrieved_docs_vector(user_question)
    reader_model, tokenizer = build_reader_llm()
    res = rag_with_llm(retrieved_docs, reader_model, tokenizer, user_question)
    print("user_question=", user_question)
    print("answer=", res)
