"""
加载、预处理多个中文对话数据集，并将其转换为适用于语言模型（尤其是对话模型）训练的格式，
最终保存处理后的数据集供后续模型训练使用
"""

import argparse
import os

import numpy as np
from tqdm import tqdm
from datasets import Dataset
from datasets import DatasetDict
from datasets import load_dataset
from datasets import concatenate_datasets
from transformers import AutoTokenizer

os.makedirs("./data", exist_ok=True)

os.environ["CURL_CA_BUNDLE"] = ""
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["HF_HUB_DISABLE_SSL_VERIFICATION"] = "1"

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="YeungNLP/firefly-llama2-7b-base")
# parser.add_argument("--model", type=str, default="ChatZhangyi-7B1-512")
parser.add_argument("--max", type=int, default=1024)
parser.add_argument("--min", type=int, default=0)
parser.add_argument("--path", type=str, default="./data")
parser.add_argument("--multi", type=bool, default=True)
args = parser.parse_args()

# model="YeungNLP/bloomz-6b4-mt-zh"
# max=512
# min=0
# path="data"
# multi=False


tokenizer = AutoTokenizer.from_pretrained(args.model)
tokenizer.pad_token = tokenizer.eos_token
# tokenizer.add_tokens(["<用户>", "<张仪>"])

dataset1 = load_dataset("BelleGroup/generated_chat_0.4M", cache_dir="./data")
dataset2 = load_dataset("BelleGroup/school_math_0.25M", cache_dir="./data")

dataset3 = load_dataset("BelleGroup/train_2M_CN", cache_dir="./data")
dataset4 = load_dataset("BelleGroup/train_1M_CN", cache_dir="./data")
dataset5 = load_dataset("BelleGroup/train_0.5M_CN", cache_dir="./data")

# dataset6 = load_dataset("BelleGroup/multiturn_chat_0.8M", cache_dir="./data")
# datas6 = [{"instruction": data["instruction"].replace("\nHuman", f"{tokenizer.eos_token}{tokenizer.eos_token}{tokenizer.eos_token}\n<用户>").replace("Assistant", "<张仪>"),
#            "input": data["input"], "output": data["output"]}
#           for data in tqdm(dataset6["train"])]

dataset7 = load_dataset("YeungNLP/firefly-train-1.1M", cache_dir="./data")
dataset7 = dataset7.rename_column("input", "instruction")
dataset7 = dataset7.rename_column("kind", "input")
dataset7 = dataset7.rename_column("target", "output")
datas7 = [{"instruction": data["instruction"],
           "input": "",
           "output": data["output"]} for data in tqdm(dataset7["train"])]
ds7 = Dataset.from_list(datas7)
datasets = concatenate_datasets([dataset1["train"], dataset2["train"], dataset3["train"], dataset4["train"],
                                 dataset5["train"], ds7])

ds8 = load_dataset("Hello-SimpleAI/HC3-Chinese", "all", cache_dir="./data")
ds8 = ds8.rename_column("question", "instruction")
ds8 = ds8.rename_column("human_answers", "input")
ds8 = ds8.rename_column("chatgpt_answers", "output")
ds8 = ds8.remove_columns(["id", "source"])
ds8 = [{"instruction": data["instruction"],
        "input": "",
        "output": data["output"][0]} for data in ds8["train"]]
ds8 = Dataset.from_list(ds8)

ds9 = load_dataset("FreedomIntelligence/phoenix-sft-data-v1", cache_dir="./data")
ds9_list = []
for qa in tqdm(ds9["train"]["conversations"]):
    ddict = {"input": ""}
    for data in qa:
        if data["from"] == "human":
            ddict["instruction"] = data["value"]
        if data["from"] == "gpt":
            ddict["output"] = data["value"]
    ds9_list.append(ddict)
ds9 = Dataset.from_list(ds9_list)

ds10 = load_dataset("BelleGroup/train_3.5M_CN", cache_dir="./data")
ds10_list = []
for qa in tqdm(ds10["train"]["conversations"]):
    ddict = {"input": ""}
    for data in qa:
        if data["from"] == "human":
            ddict["instruction"] = data["value"]
        if data["from"] == "assistant":
            ddict["output"] = data["value"]
    ds10_list.append(ddict)
ds10 = Dataset.from_list(ds10_list)

# ds11 = load_dataset("fnlp/moss-002-sft-data")

datasets = concatenate_datasets([dataset3["train"], dataset4["train"],
                                 dataset5["train"], ds7, ds8, ds10, ds9])
# datasets_single = concatenate_datasets([dataset1["train"],dataset2["train"], ds10]) # ds10.select(range(int(1e6)))
print(datasets)


def format_single(question, answer):
    return f"<用户>:{question}\n<张仪>:{answer}{tokenizer.eos_token}"


def format_multi(question, answer):
    return f"{question}{answer}{tokenizer.eos_token}"


texts_single = [format_single(data['instruction'], data['output']) for data in tqdm(datasets)]
texts_multi = [format_multi(data['instruction'], data['output']) for data in tqdm(datas7)]
# texts_multi = [format_multi(data['instruction'], data['output']) for data in tqdm(datas6)]
# multi_texts = [data['instruction'] + data['output'] for data in tqdm(datas6)]
# multi_texts = [text.replace("\n<用户>", tokenizer.eos_token + "\n<用户>") + tokenizer.eos_token
#                for text in multi_texts]
texts = texts_single + texts_multi if args.multi else texts_single

concat = [{"text": i} for i in texts]
# concat = [i for i in concat if args.min <= len(i["text"]) < args.max]
print("filter:", len(concat))
print("percent:", len(concat) / len(texts))


def tokenize_function(examples):
    return tokenizer(examples["text"], truncation=True, return_tensors='pt', padding='max_length',
                     max_length=args.max)


concat = Dataset.from_list(concat)
print("tokenize")

tokenized_datasets = concat.map(
    tokenize_function,
    batched=True,
    num_proc=10,
    remove_columns=["text"],
    load_from_cache_file=False,
)


def group_texts(examples):
    examples["labels"] = examples["input_ids"].copy()
    return examples


print("labels")
lm_datasets = tokenized_datasets.map(
    group_texts,
    batched=True,
    num_proc=10,
    # batch_size=10,
    load_from_cache_file=False,
)

print("save_to_disk")
lm_datasets.save_to_disk(args.path)

# print(np.array(lm_datasets["input_ids"]))