from datasets import load_dataset
import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
print(f"current_dir: {current_dir}")
sys.path.insert(0, parent_dir)

def str_to_lst(data):
    data["pos"] = [data["pos"]]
    return data

financial_qa_10K_data=f"{current_dir}/virattt/financial-qa-10K"

ft_data_dir=f"{current_dir}/ft_data"

if __name__ == "__main__":
    """
    Suppose we are willing to fine-tune our model for financial tasks. We found an open-source dataset that could be useful: financial-qa-10k. Let's see how to properly prepare our dataset for fine-tuning.
    假设我们愿意针对财务任务微调我们的模型。我们发现了一个可能有用的开源数据集：financial-qa-10k。让我们看看如何正确准备我们的数据集以进行微调。

    The raw dataset has the following structure:
    原始数据集具有以下结构：

    5 columns of: 'question', 'answer', 'context', 'ticker', and 'filing'.
    7000 rows.
    """
    ds = load_dataset(financial_qa_10K_data, split="train")
    print("dataset: ")
    print(ds)

    """
    1. Data for Fine-tuning
    Construct the dataset to the following format:
    1. 用于微调的数据
    将数据集构造为以下格式：

    {"query": str, "pos": List[str], "neg":List[str], "pos_scores": List[int], "neg_scores": List[int], "prompt": str, "type": str}
    
    query is the query, and pos is a list of positive texts, neg is a list of negative texts. pos_scores is a list of scores corresponding to the query and pos, neg_scores is a list of scores corresponding to the query and neg, if you don't use knowledge distillation, it can be ignored. prompt is the prompt used for the query, it will cover query_instruction_for_retrieval. type is used for bge-en-icl, it includes normal, symmetric_class, symmetric_clustering, .etc. If you have no negative texts for a query, you can random sample some from the entire corpus as the negatives.
    
    query 是查询，pos 是正向样本文本列表，neg 是负向样本文本列表。pos_scores 是 query 和 pos 对应的分数列表，neg_scores 是 query 和 neg 对应的分数列表，如果不使用知识蒸馏，可以忽略不计。
    prompt 是用于查询的提示，它将涵盖 query_instruction_for_retrieval。type 用于 bge-en-icl，它包括 normal、symmetric_class、symmetric_clustering 等。如果查询没有否定文本，则可以从整个语料库中随机抽样一些作为否定文本。

    We select the columns 'question' and 'context' as our query and answer(pos), and rename the columns. Then add the 'id' column for later evaluation use.
    我们选择列 'question' 和 'context' 作为我们的 query 和 answer（pos），然后重命名这些列。然后添加 'id' 列以供以后评估使用。
    """

    ds = ds.select_columns(column_names=["question", "context"])
    ds = ds.rename_column("question", "query")
    ds = ds.rename_column("context", "pos")
    ds = ds.add_column("id", [str(i) for i in range(len(ds))])
    print("dataset[0]: ")
    print(ds[0])

    """
    Negative examples are important during the training of embedding models. Our initial dataset does not come with negative texts. Thus we directly sample a few from the whole corpus.
    在嵌入模型的训练过程中，反例很重要。我们的初始数据集没有负面文本。因此，我们直接从整个语料库中抽取了一些。
    """
    import numpy as np

    np.random.seed(520)
    neg_num = 10

    # sample negative texts
    new_col = []
    for i in range(len(ds)):
        ids = np.random.randint(0, len(ds), size=neg_num)
        while i in ids:
            ids = np.random.randint(0, len(ds), size=neg_num)
        neg = [ds[i.item()]["pos"] for i in ids]
        new_col.append(neg)
    ds = ds.add_column("neg", new_col)

    # change the key of 'pos' to a list
    ds = ds.map(str_to_lst)

    """
    Lastly, we add the prompt which is used for query. It will be the query_instruction_for_retrieval during inference.
    最后，我们添加用于 query 的 prompt。这将是推理过程中的query_instruction_for_retrieval。
    """
    instruction = "Represent this sentence for searching relevant passages: "
    ds = ds.add_column("prompt", [instruction]*len(ds))

    """
    Now a single row of the dataset is:
    现在，数据集的单行为：
    """
    print("dataset[0]: ")
    print(ds[0])

    """
    Then we split the dataset into training set and testing set.
    然后我们将数据集拆分为训练集和测试集。
    """
    split = ds.train_test_split(test_size=0.1, shuffle=True, seed=520)
    train = split["train"]
    test = split["test"]
    """
    Now we are ready to store the data for later fine-tuning:
    现在我们准备好存储数据以供以后微调：
    """
    train.to_json(f"{ft_data_dir}/training.json")

    """
    2. Test Data for Evaluation
    The last step is to construct the testing dataset for evaluaton.

    测试数据进行评估
    最后一步是构建用于 评估 的测试数据集。
    """
    print("test: ")
    print(test)

    # First select the columns for queries:
    # 首先选择查询的列：

    queries = test.select_columns(column_names=["id", "query"])
    queries = queries.rename_column("query", "text")
    queries[0]

    # Then select the columns for corpus:
    corpus = ds.select_columns(column_names=["id", "pos"])
    corpus = corpus.rename_column("pos", "text")

    # Finally, make the qrels that indicating the relations of queries and corresponding corpus
    # 最后，制作表示查询关系和对应语料库的 qrels
    qrels = test.select_columns(["id"])
    qrels = qrels.rename_column("id", "qid")
    qrels = qrels.add_column("docid", list(test["id"]))
    qrels = qrels.add_column("relevance", [1]*len(test))
    qrels[0]
    
    # Store the training set
    # 存储训练集
    queries.to_json(f"{ft_data_dir}/test_queries.jsonl")
    corpus.to_json(f"{ft_data_dir}/corpus.jsonl")
    qrels.to_json(f"{ft_data_dir}/test_qrels.jsonl")
