"""
一套大规模多源中文对话 / 问答类数据集的预处理流水线，核心功能是整合数十种公开 + 本地数据集，通过标准化格式、过滤无效数据、转换模型输入格式，
最终输出可直接用于自回归语言模型（如对话模型、问答模型）训练的结构化数据集。
1. 核心配置：参数与 Tokenizer 初始化
为预处理流程提供灵活配置，并加载模型对应的文本编码器，解决 “模型输入不兼容” 问题。
2. 数据集管理：多源加载与缓存优化
解决 “多源数据集重复加载、处理耗时” 问题，通过缓存机制提升效率，同时整合海量中文任务数据。
3. 数据集标准化：字段统一与结构适配
解决 “不同数据集字段命名 / 结构不一致，无法直接拼接” 的核心问题，将所有数据统一为 “指令 - 输出” 格式。
4. 文本处理：格式化与无效数据过滤
确保输入模型的文本 “格式统一、内容有效”，减少训练噪声。
6. 最终输出：数据集保存
将处理完成的结构化数据集保存到指定路径，供后续模型微调直接加载使用，无需重复预处理。
"""

import os
import argparse
import numpy as np
import random
from tqdm import tqdm
import pyarrow as pa
from datasets import Dataset
from datasets import DatasetDict
from datasets import load_dataset, load_from_disk
from datasets import concatenate_datasets
from transformers import AutoTokenizer

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="ChatZhangyi-7B1-512")
parser.add_argument("--max", type=int, default=800)
parser.add_argument("--min", type=int, default=0)
parser.add_argument("--path", type=str, default="data")
parser.add_argument("--multi", type=bool, default=False)
args = parser.parse_args()

# model="YeungNLP/bloomz-6b4-mt-zh"
# max=512
# min=0
# path="data"
# multi=False
tokenizer = AutoTokenizer.from_pretrained(args.model)
tokenizer.pad_token = tokenizer.eos_token
# tokenizer.add_tokens(["<用户>", "<张仪>"])
if os.path.exists("concatenate_datasets"):
    datasets = load_from_disk("concatenate_datasets")
else:
    ds1 = load_dataset("BelleGroup/generated_chat_0.4M")["train"]
    ds2 = load_dataset("BelleGroup/school_math_0.25M")["train"]

    # ds3 = load_dataset("BelleGroup/train_2M_CN")["train"]
    # ds4 = load_dataset("BelleGroup/train_1M_CN")["train"]
    # ds5 = load_dataset("BelleGroup/train_0.5M_CN")["train"]

    dataset6 = load_dataset("BelleGroup/multiturn_chat_0.8M")
    datas6 = [
        {"instruction": data["instruction"]. \
            replace("Human", tokenizer.eos_token + tokenizer.eos_token + "\n<用户>"). \
            replace("Assistant", "\n<张仪>"). \
            lstrip(tokenizer.eos_token). \
            lstrip("\n<用户>:"). \
            rstrip("\n<张仪>:").strip(),
         "input": data["input"], "output": data["output"]}
        for data in tqdm(dataset6["train"])]
    ds6 = Dataset.from_list(datas6)

    dataset7 = load_dataset("YeungNLP/firefly-train-1.1M")
    dataset7 = dataset7.rename_column("input", "instruction")
    dataset7 = dataset7.rename_column("kind", "input")
    dataset7 = dataset7.rename_column("target", "output")
    datas7 = [{"instruction": data["instruction"],
               "input": "",
               "output": data["output"]} for data in tqdm(dataset7["train"])]
    ds7 = Dataset.from_list(datas7)

    ds8 = load_dataset("Hello-SimpleAI/HC3-Chinese", "all")
    ds8 = ds8.rename_column("question", "instruction")
    ds8 = ds8.rename_column("human_answers", "input")
    ds8 = ds8.rename_column("chatgpt_answers", "output")
    ds8 = ds8.remove_columns(["id", "source"])
    ds8 = [{"instruction": data["instruction"],
            "input": "",
            "output": data["output"][0]} for data in ds8["train"]]
    ds8 = Dataset.from_list(ds8)

    ds9 = load_dataset("FreedomIntelligence/phoenix-sft-data-v1")
    ds9_list = []
    for qa in tqdm(ds9["train"]["conversations"]):
        ddict = {"input": ""}
        for data in qa:
            if data["from"] == "human":
                ddict["instruction"] = data["value"]
            if data["from"] == "gpt":
                ddict["output"] = data["value"]
        ds9_list.append(ddict)
    ds9 = Dataset.from_list(ds9_list)

    ds10 = load_dataset("BelleGroup/train_3.5M_CN")
    ds10_list = []
    for qa in tqdm(ds10["train"]["conversations"]):
        ddict = {"input": ""}
        for data in qa:
            if data["from"] == "human":
                ddict["instruction"] = data["value"]
            if data["from"] == "assistant":
                ddict["output"] = data["value"]
        ds10_list.append(ddict)
    ds10 = Dataset.from_list(ds10_list)
    # fnlp/moss-002-sft-data
    ds11 = load_from_disk("zh_honesty_helpfulness").shuffle(seed=42)

    ds12 = load_from_disk("qa").shuffle(seed=42)

    ks = ['computer_network', 'operating_system', 'computer_architecture', 'college_programming', 'college_physics',
          'college_chemistry', 'advanced_mathematics', 'probability_and_statistics', 'discrete_mathematics',
          'electrical_engineer', 'metrology_engineer', 'high_school_mathematics', 'high_school_physics',
          'high_school_chemistry', 'high_school_biology', 'middle_school_mathematics', 'middle_school_biology',
          'middle_school_physics', 'middle_school_chemistry', 'veterinary_medicine', 'college_economics',
          'business_administration', 'marxism', 'mao_zedong_thought', 'education_science', 'teacher_qualification',
          'high_school_politics', 'high_school_geography', 'middle_school_politics', 'middle_school_geography',
          'modern_chinese_history', 'ideological_and_moral_cultivation', 'logic', 'law',
          'chinese_language_and_literature',
          'art_studies', 'professional_tour_guide', 'legal_professional', 'high_school_chinese', 'high_school_history',
          'middle_school_history', 'civil_servant', 'sports_science', 'plant_protection', 'basic_medicine',
          'clinical_medicine', 'urban_and_rural_planner', 'accountant', 'fire_engineer',
          'environmental_impact_assessment_engineer', 'tax_accountant', 'physician']
    # prompts_up = ["请从选项中选择一个最佳答案回答这个问题（只回答选项）：{}\n{}",
    #               "从以下几个答案中选择一个作答（只回答选项）：{}\n{}",
    #               "A、B、C、D选项中哪个是这个问题的答案（只回答选项）：{}\n{}",
    #               "A、B、C、D中哪个是答案（只回答选项）：{}\n{}",
    #               "{}（只回答选项）\n{}"]
    # prompts_down = ["{}\n请从选项中选择一个最佳答案回答这个问题（只回答选项）\n{}",
    #                 "{}\n从以下几个答案中选择一个作答（只回答选项）\n{}",
    #                 "{}\nA、B、C、D选项中哪个是这个问题的答案（只回答选项）\n{}",
    #                 "{}\nA、B、C、D中哪个是答案（只回答选项）\n{}",
    #                 "{}\n{}（只回答选项）"]
    inputs, outputs = [], []
    for k in ks:
        _ds1 = load_dataset(r"ceval/ceval-exam", name=k)
        for _k in ['test', 'val', 'dev']:
            for data in _ds1[_k]:
                question, abcd = data['question'], f"A {data['A']}\nB {data['B']}\nC {data['C']}\nD {data['D']}"
                # if random.random() >= 0.5:
                #     prompt = random.choice(prompts_up)
                # else:
                #     prompt = random.choice(prompts_down)
                # instruction = prompt.format(question, abcd)
                instruction = f'{question} {abcd}'
                if data['answer'].strip():
                    inputs.append(instruction)
                    outputs.append(f"{data['answer']} {data[data['answer']]}")
    ds13 = Dataset.from_dict({"instruction": inputs, "output": outputs}).shuffle(seed=1)

    ds14 = load_dataset('json', data_files=f'/home/kas/zhangyi/mrc.jsonl')
    ds14 = ds14.rename_column("prompt", "instruction")
    ds14 = ds14.rename_column("response", "output")['train']


    def read_arrow_to_df_julia_ok(path):
        r = pa.ipc.RecordBatchStreamReader(path)
        df = r.read_pandas()
        return df


    # fnlp/moss-003-sft-data
    df = read_arrow_to_df_julia_ok(
        "/home/kas/.cache/huggingface/datasets/json/default-34dc6729b5b6e360/0.0.0/a3e658c4731e59120d44081ac10bf85dc7e1388126b92338344ce9661907f253/json-train.arrow")
    ds15 = Dataset.from_pandas(df)
    ds15 = ds15.rename_column("prompt", "instruction")
    ds15 = ds15.rename_column("response", "output")

    ds16 = load_from_disk("llmzoo")

    ds17 = load_from_disk("MMCU0513-datasets")
    ds18 = load_from_disk("JEC-QA-datasets")
    ds19 = load_from_disk("AGIEval-datasets")

    print(ds1, ds2, ds6, ds7, ds8, ds9, ds10, ds11,
          ds12, ds13, ds14, ds15, ds16, ds17, ds18, ds19)
    datasets = concatenate_datasets(
        [ds1, ds2, ds6, ds7, ds8, ds9, ds10, ds11,
         ds12, ds13, ds14, ds15, ds16, ds17, ds18, ds19]).shuffle(seed=1)
    # datasets = concatenate_datasets(
    #     [ds1, ds2, ds6, ds8, ds10,
    #      ds12, ds13, ds14, ds16, ds17, ds18, ds19]).shuffle(seed=1)
    print(datasets)

    datasets.save_to_disk("concatenate_datasets")


def format(question, answer):
    return f"<用户>:{question.lower()}\n<张仪>:{answer.lower()}{tokenizer.eos_token}"


# print('空: ', len([data for data in tqdm(datasets) if not data['instruction'] or not data['output']]))
texts = [format(data['instruction'], data['output']) for data in tqdm(datasets) if
         data['instruction'] and data['output']]
# multi_texts = [data['instruction'] + data['output'] for data in tqdm(datas6)]
# multi_texts = [text.replace("\n<用户>", tokenizer.eos_token + "\n<用户>") + tokenizer.eos_token
#                for text in multi_texts]
# texts = multi_texts if args.multi else texts
concat = [{"text": i} for i in texts]
concat = [i for i in concat if args.min <= len(i["text"]) <= args.max]
print("filter:", len(concat))
print("percent:", len(concat) / datasets.num_rows)


def tokenize_function(examples):
    return tokenizer(examples["text"], truncation=True, return_tensors='pt', padding='max_length',
                     max_length=args.max)


concat = Dataset.from_list(concat)
print("tokenize")

tokenized_datasets = concat.map(
    tokenize_function,
    batched=True,
    num_proc=30,
    remove_columns=["text"],
    load_from_cache_file=False,
)


def group_texts(examples):
    examples["labels"] = examples["input_ids"].copy()
    return examples


print("labels")
lm_datasets = tokenized_datasets.map(
    group_texts,
    batched=True,
    num_proc=30,
    # batch_size=10,
    load_from_cache_file=False,
)

print("save_to_disk")
lm_datasets.save_to_disk(args.path)

# print(np.array(lm_datasets["input_ids"]))
