"""
一个中文对话数据预处理工具，核心作用是将多源中文对话数据集标准化、格式化，
并转换为适用于语言模型（尤其是自回归对话模型）训练的输入格式，主要解决了以下问题：
1、解决多源数据格式不统一的问题
2、解决对话格式不规范的问题，模型训练需要统一的对话角色和格式标识，否则难以学习对话逻辑
3、解决数据长度不适用的问题。模型训练有固定的最大序列长度（由--max参数指定），过短的文本可能信息量不足，过长的文本会导致训练效率低或内存溢出。
4、解决模型输入格式转换的问题。原始文本无法直接被模型使用，需要转换为模型可识别的token序列
5、解决数据存储与复用的问题
"""

import argparse
import numpy as np
from tqdm import tqdm
from datasets import Dataset
from datasets import DatasetDict
from datasets import load_dataset
from datasets import concatenate_datasets
from transformers import AutoTokenizer

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="checkpoint")
parser.add_argument("--max", type=int, default=1024)
parser.add_argument("--min", type=int, default=0)
parser.add_argument("--path", type=str, default="./data")
parser.add_argument("--multi", type=bool, default=False)
args = parser.parse_args()

# model="YeungNLP/bloomz-6b4-mt-zh"
# max=512
# min=0
# path="data"
# multi=False


tokenizer = AutoTokenizer.from_pretrained(args.model)
tokenizer.pad_token = tokenizer.eos_token
# tokenizer.add_tokens(["<用户>", "<卫鞅>"])

# dataset1 = load_dataset("BelleGroup/generated_chat_0.4M")
# dataset2 = load_dataset("BelleGroup/school_math_0.25M")
# dataset3 = load_dataset("BelleGroup/train_2M_CN")
dataset4 = load_dataset("OmniData/BelleGroup-school_math_0_dot_25M", cache_dir="./data")
dataset5 = load_dataset("OmniData/BelleGroup-train_2M_CN", cache_dir="./data")
# dataset6 = load_dataset("BelleGroup/multiturn_chat_0.8M")
# datas6 = [{"instruction": data["instruction"].replace("Human", "<用户>").replace("Assistant", "<卫鞅>"),
#            "input": data["input"], "output": data["output"]}
#           for data in tqdm(dataset6["train"])]
dataset7 = load_dataset("YeungNLP/firefly-pretrain-dataset", cache_dir="./data")
dataset7 = dataset7.rename_column("input", "instruction")
dataset7 = dataset7.rename_column("kind", "input")
dataset7 = dataset7.rename_column("target", "output")
datas7 = [{"instruction": data["instruction"],
           "input": "", "output": data["output"]}
          for data in tqdm(dataset7["train"])]
ds7 = Dataset.from_list(datas7)
# datasets = concatenate_datasets([dataset1["train"], dataset2["train"], dataset3["train"], dataset4["train"],
#                                  dataset5["train"], ds7])

datasets = concatenate_datasets([dataset4["train"], dataset5["train"], ds7])


def format(question, answer):
    return f"<用户>:{question}\n<卫鞅>:{answer}{tokenizer.eos_token}"


texts = [format(data['instruction'], data['output']) for data in tqdm(datasets)]
# multi_texts = [data['instruction'] + data['output'] for data in tqdm(datas6)]
# multi_texts = [text.replace("\n<用户>", tokenizer.eos_token + "\n<用户>") + tokenizer.eos_token
#                for text in multi_texts]
# texts = multi_texts if args.multi else texts
concat = [{"text": i} for i in texts]
concat = [i for i in concat if args.min <= len(i["text"]) < args.max]


def tokenize_function(examples):
    return tokenizer(examples["text"], truncation=True, return_tensors='pt', padding='max_length',
                     max_length=args.max)


concat = Dataset.from_list(concat)

tokenized_datasets = concat.map(
    tokenize_function,
    batched=True,
    num_proc=10,
    remove_columns=["text"],
    load_from_cache_file=False,
)


def group_texts(examples):
    examples["labels"] = examples["input_ids"].copy()
    return examples


lm_datasets = tokenized_datasets.map(
    group_texts,
    batched=True,
    num_proc=10,
    # batch_size=10,
    load_from_cache_file=False,
)

lm_datasets.save_to_disk(args.path)

print(np.array(lm_datasets["input_ids"]))
