from datasets import load_dataset, Dataset, concatenate_datasets
import numpy as np
from instruction_re.utils.utils import load_json, load_config, set_global_seed

from instruction_re.utils.data_utils import (
    get_data_config,
)
from instruction_re.utils.data_aug_utils import (
    get_entity_by_type_json,
    get_relations_by_type_json,
    group_entity_by_type,
)

from data_aug_arg_parse import get_train_args

# from utils import group_entity_by_type
from transformers import (
    AutoModel,
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    DataCollatorForSeq2Seq,
)
import json
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import os
from tqdm import tqdm
from typing import Union, Dict, List
from pathlib import Path
from tqdm import tqdm

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from datetime import datetime


def get_aug_triple_config(current_dir: Union[str, Path], rules: List[str]):
    """获取数据集的配置信息"""

    # 判断 dataset_dir 是否存在
    if not os.path.exists(current_dir):
        raise ValueError(f"dataset dir: {current_dir} not exists.")

    return {r: f"{current_dir}/{r}.json" for r in rules}


def setup_ddp(rank, world_size):
    """
    设置分布式数据并行环境
    """
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"

    # 初始化进程组
    dist.init_process_group("nccl", rank=rank, world_size=world_size)


def cleanup():
    """
    清理分布式数据并行环境
    """
    dist.destroy_process_group()


# def get_few_shot_data(
#     root_dir: Union[str, Path],
#     dataset_name: str,
# ):
#     dataset_dir = f"{root_dir}/{dataset_name}"

#     if "semval" in dataset_name:
#         return {
#             "train": f"{dataset_dir}/few_shot_50.json",
#             "valid": f"{dataset_dir}/dev.json",
#             "test": f"{dataset_dir}/test.json",
#         }

#     return {
#         "train": f"{dataset_dir}/few_shot_50.json",
#         "valid": f"{dataset_dir}/dev.json",
#         "test": f"{dataset_dir}/test.json",
#     }


def get_context_from_relations(relations: List) -> List:
    """从原始数据集的relations字段中提取元组形式的列表"""
    context = []
    for r in relations:
        context.append(
            (
                r["head"]["name"],
                r["head"]["type"],
                r["type"],
                r["tail"]["name"],
                r["tail"]["type"],
            )
        )
    return context


def remove_pos_for_triple_dataset(data):
    """删除数据集中的relations字段中的head 和 tail 的pos属性"""
    relations = []
    for r in data["relations"]:
        relations.append(
            {
                "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
                "type": r["type"],
                "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
            }
        )
    return {
        "relations": relations,
    }


def remove_pos_attr(data):
    """清理数据集relations字段中head 和 tail 的pos属性"""
    relations = []
    for r in data["relations"]:
        relations.append(
            {
                "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
                "type": r["type"],
                "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
            }
        )
    return {
        "sentence": data["sentence"],
        "relations": relations,
    }


# 随机交换两个triple的位置
def swap_triple(data):
    index = np.random.choice(len(data["relations"]) - 1)

    # 交换 index 和 index+1 位置的triple
    temp = data["relations"][index]
    data["relations"][index] = data["relations"][index + 1]
    data["relations"][index + 1] = temp

    return {
        "relations": data["relations"],
    }


# def swap_head_tail(data):
#     """随机选择一个triple，交换head 和 tail，并替换为相反方向的 relation type"""
#     index = np.random.choice(len(data["relations"]))

#     sample = data["relations"][index]

#     head = sample["head"]
#     tail = sample["tail"]
#     rel_type = sample["type"]

#     sample["head"] = tail
#     sample["tail"] = head
#     sample["type"] = np.random.choice(similar_relations[rel_type]["opposite_direction"])
#     data["relations"][index] = sample

#     return {
#         "relations": data["relations"],
#         # "rule": "swap_head_tail",
#     }


def get_alternative_relations_by_dataset(dataset_name):
    """根据数据集名称获取相近关系类型，用于替换关系类型 replace_relation"""
    alternative_relations = load_json(f"./alternative_relations.json")
    return alternative_relations.get(
        dataset_name,
    )


def calculate_sample_times(total_num, per_rule_num):
    """swap_triple 和 remove_triple 的采样次数计算"""
    if per_rule_num == 0:
        raise ValueError("greater_than_2_num 不能为空")

    if total_num % per_rule_num == 0:
        per_rule_num = total_num // per_rule_num
    else:
        per_rule_num = total_num // per_rule_num + 1

    return per_rule_num


# 随机删除一个triple
def remove_triple(data):
    # 随机删除一个triple
    relations = data["relations"]

    index = np.random.choice(len(relations))
    relations.pop(index)

    return {
        "relations": relations,
    }


def merge_output_and_origin_triples(triple_dataset, output_texts):
    output_dataset = []
    for sample, text in zip(triple_dataset, output_texts):
        entity_list = set()
        relations = []
        for r in sample["relations"]:
            head = r["head"]["name"]
            tail = r["tail"]["name"]
            head_start_index = text.find(head)
            # head_end_index = head_start_index + len(head)

            tail_start_index = text.find(tail)
            # tail_end_index = tail_start_index + len(tail)

            relations.append(
                {
                    "head": {
                        "name": head,
                        "type": r["head"]["type"],
                        # "pos": [head_start_index, head_end_index],
                    },
                    "type": r["type"],
                    "tail": {
                        "name": tail,
                        "type": r["tail"]["type"],
                        # "pos": [tail_start_index, tail_end_index],
                    },
                }
            )

            entity_list.add(head)
            entity_list.add(tail)

        # 简单的清理操作，检查生成的文本是否包含实体，如果不包含，则跳过
        if not any(entity in text for entity in entity_list):
            continue

        output_dataset.append(
            {
                "sentence": text,
                "relations": relations,
                # "rule": sample["rule"],
            }
        )
    return Dataset.from_list(output_dataset)


def main():
    args = get_train_args()
    triple_rules = args.rules.split()
    # now = datetime.now()
    # few_shot_num = 50
    few_shot_num = args.few_shot_num
    is_few_shot = 0

    config = load_config(args.path_to_model_config)

    # set_global_seed(config["seed"])
    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot/conll04/google/flan-t5-large/0906_23_38",
    #     "SciERC": "data_aug/few_shot/SciECR_with_entity_processed/google/flan-t5-large/0906_23_40",
    #     "semval-RE": "data_aug/few_shot/semval-RE/google/flan-t5-large/0906_23_50",
    # }

    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot_50/conll04/google/flan-t5-large/0910_15_39",
    #     "SciERC": "data_aug/few_shot_50/SciERC/google/flan-t5-large/0910_15_40",
    #     "semval-RE": "data_aug/few_shot_50/semval-RE/google/flan-t5-large/0910_15_42",
    # }

    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot_150/ft_models/conll04/google/flan-t5-large/",
    #     "SciERC": "data_aug/few_shot_150/ft_models/SciERC/google/flan-t5-large/",
    #     "semval-RE": "data_aug/few_shot_150/ft_models/semval-RE/google/flan-t5-large/",
    # }
    set_global_seed(args.seed)
    dataset_name = args.dataset_name
    data_config = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=is_few_shot,
        few_shot_num=few_shot_num,
    )

    instruction = "Generate a natural language sentence that expresses the following relation triple:"
    raw_datasets = load_dataset("json", data_files=data_config)

    # 计算要生成的样本数
    aug_examples_num = raw_datasets["train"].num_rows * args.aug_times

    # 计算每种规则需要生成的样本数，TODO： 为了样本过滤后的数量不足，增加10个样本
    # per_rule_num = aug_examples_num // len(triple_rules) + 10
    per_rule_num = aug_examples_num
    # per_rule_num = raw_datasets["train"].num_rows

    relations_gt1 = raw_datasets["train"].filter(
        lambda x: len(x["relations"]) >= 1,
        load_from_cache_file=False,
        desc="filter relations length > 1",
    )
    relations_grater_than_2 = raw_datasets["train"].filter(
        lambda x: len(x["relations"]) >= 2,
        load_from_cache_file=False,
        desc="filter relations length >= 2",
    )

    """
    计算原数据集中满足规则的样本数
    add_triple和add_same_type_triple的样本数为 relations>=1 的样本数
    swap_triple 和 remove_triple 的样本数为 relations>=2 的样本数
    replace_same_type_entity 的样本数为 relations>=1 的样本数
    """
    rule2original_num = {}

    for rule in triple_rules:
        if rule in ["add_triple", "add_same_type_triple", "replace_same_type_entity"]:
            rule2original_num[rule] = len(relations_gt1)
        elif rule in ["swap_triple", "remove_triple"]:
            rule2original_num[rule] = len(relations_grater_than_2)

    # 根据规则的不同，需要采样的次数不同
    rule2_sample_times = {}

    for rule in triple_rules:
        # 原数据集中满足规则的样本数
        original_train_num = rule2original_num.get(rule, 0)

        if original_train_num == 0:
            continue

        rule2_sample_times[rule] = calculate_sample_times(
            per_rule_num, original_train_num
        )

    dataset_path = os.path.join(args.root_data_dir, dataset_name)

    # 获取实体和关系类型
    entity_by_type = get_entity_by_type_json(args.root_data_dir, dataset_name)
    relations_by_type = get_relations_by_type_json(args.root_data_dir, dataset_name)
    relation_labels = load_json(f"{dataset_path}/labels.json")

    def replace_entity_for_relations(data):
        """替换三元组头尾实体为同类型实体"""

        # 选择要替换的三元组index
        index = np.random.choice(len(data["relations"]))

        # 拿到被替换三元组的头尾实体
        sample = data["relations"][index]
        head = sample["head"]
        tail = sample["tail"]

        # 从entity_by_type 中随机选择一个同类型的实体，替换原来的实体名称
        head_name = np.random.choice(entity_by_type[head["type"]])
        tail_name = np.random.choice(entity_by_type[tail["type"]])

        # 替换
        data["relations"][index]["head"]["name"] = head_name
        data["relations"][index]["tail"]["name"] = tail_name

        return {
            "relations": data["relations"],
        }

    # 随机增加一个同类型triple
    def add_same_type_triple(data):
        index = np.random.choice(len(data["relations"]))

        sample = data["relations"][index]
        relation_type = sample["type"]

        # 从relations_by_type 中随机选择一个相同类型的triple
        new_sample = np.random.choice(relations_by_type[relation_type])
        data["relations"].insert(index, new_sample)

        return {
            "relations": data["relations"],
            # "rule": "add_same_type_triple",
        }

    def add_triple(data):
        """随机增加一个任意关系类型的triple"""

        relation_index = np.random.choice(len(relation_labels))

        choiced_relation_type = relation_labels[relation_index]
        added_triple = np.random.choice(relations_by_type[choiced_relation_type])

        if len(data["relations"]) == 0:
            data["relations"].append(added_triple)
        else:
            index = np.random.choice(len(data["relations"]))
            data["relations"].insert(index, added_triple)

        return {
            "relations": data["relations"],
        }

    def add_triple_for_one_example(data):
        """随机增加一个任意关系类型的triple"""

        relation_index = np.random.choice(len(relation_labels))

        choiced_relation_type = relation_labels[relation_index]
        added_triple = np.random.choice(relations_by_type[choiced_relation_type])

        if len(data["relations"]) == 0:
            data["relations"].append(added_triple)
        else:
            index = np.random.choice(len(data["relations"]))
            data["relations"].insert(index, added_triple)

        return {
            "relations": data["relations"],
        }

    # print(triple_rules)
    current_data_dir = f"{args.root_data_dir}/{dataset_name}/{args.save_dir_name}"

    # 创建当前数据集的文件夹
    os.makedirs(current_data_dir, exist_ok=True)

    # return

    # 处理数据
    # 对raw_datasets["train"]中的每一个样本，对于relations字段，删除head 的pos 和 tail 的pos
    raw_datasets = raw_datasets.map(
        remove_pos_attr,
        load_from_cache_file=False,
        desc="remove pos attr",
    )

    def retain_original(data):
        return {
            "relations": data["relations"],
        }

    # 用于保存各种规则生成的三元组数据集
    # add_raw_datasets = raw_datasets["train"].filter(
    #     lambda x: len(x["relations"]) <= 2,
    #     load_from_cache_file=False,
    #     desc="filter relations length <=2",
    # )

    # 生成num条rule规则的数据集
    def gen_aug_triples_by_rule(rule, total_num=per_rule_num):
        result_dataset = []
        generate_funcs = {
            "add_triple": add_triple_for_one_example,
            "add_same_type_triple": add_same_type_triple,
            "swap_triple": swap_triple,
            "remove_triple": remove_triple,
            "replace_same_type_entity": replace_entity_for_relations,
            "origin": retain_original,
        }

        origin_datasets = {
            "add_triple": relations_gt1,
            "add_same_type_triple": relations_gt1,
            "swap_triple": relations_grater_than_2,
            "remove_triple": relations_grater_than_2,
            "replace_same_type_entity": relations_gt1,
            "origin": relations_gt1,
        }

        generate_func = generate_funcs.get(rule)
        origin_dataset = origin_datasets.get(rule)

        print(f"generate {rule} dataset")

        for _ in range(total_num):
            choice_index = np.random.choice(len(origin_dataset))
            sample = origin_dataset[choice_index]
            result_dataset.append(generate_func(sample))

        return Dataset.from_list(result_dataset)

    # def gen_aug_triples_by_rule(rule, per_rule_num=per_rule_num):
    #     """根据规则生成增强的三元组数据集，并保存在对应数据集中，文件名为{rule}.json"""
    #     # 随机增加一个任意类型的三元组
    #     if rule == "add_triple":
    #         dataset = relations_gt1.map(
    #             add_triple,
    #             desc="add triple",
    #         )

    #     # 随机选择一个样本，选择其中的relation，在相似的位
    #     if rule == "add_same_type_triple":
    #         dataset = relations_gt1.map(
    #             add_same_type_triple,
    #             desc="add same type triple",
    #             # load_from_cache_file=False,
    #         )

    #     # 交换两个相近的triple
    #     if rule == "swap_triple":
    #         dataset = relations_grater_than_2.map(swap_triple, desc="swap triple")

    #     # 随机删除一个三元组
    #     if rule == "remove_triple":
    #         dataset = relations_grater_than_2.map(remove_triple, desc="remove triple")

    #     # 随机替换样本中一个relation 中的同类型头尾实体
    #     if rule == "replace_same_type_entity":
    #         dataset = relations_gt1.map(
    #             replace_entity_for_relations,
    #             desc="replace entity for relations",
    #         )

    #     if rule == "origin":
    #         dataset = relations_gt1.map(retain_original, desc="retain original")

    #     return dataset

    # triple_data_path = get_aug_triple_config(current_data_dir, triple_rules)
    # # 针对triple_data_path 中的每一个文件，判断其是否存在，如果存在，则加载，否则生成
    # for rule, path in triple_data_path.items():
    #     if not os.path.exists(path):
    #         print(f"generate {rule} dataset")
    #         rule_dataset = gen_aug_triples_by_rule(rule)
    #         rule_dataset.to_json(f"{current_data_dir}/{rule}.json")

    # triple_datasets = load_dataset("json", data_files=triple_data_path)

    from datasets import DatasetDict

    triple_datasets = DatasetDict()
    for rule in triple_rules:
        triple_datasets[rule] = gen_aug_triples_by_rule(rule, per_rule_num)
        # update_rule_dataset = []
        # sample_times = rule2_sample_times.get(rule, 1)
        # for _ in range(sample_times):
        #     update_rule_dataset.append(gen_aug_triples_by_rule(rule))

        # triple_datasets[rule].to_json(f"{current_data_dir}/{rule}.json")

        if is_few_shot:
            if data_config["train"].endswith("few_shot.json"):
                middle_save_dir = (
                    f"{args.root_data_dir}/{dataset_name}/few_shot10%_{args.aug_times}"
                )
            else:
                middle_save_dir = f"{args.root_data_dir}/{dataset_name}/few_shot_{few_shot_num}_{args.aug_times}"
        else:
            middle_save_dir = (
                f"{args.root_data_dir}/{dataset_name}/full_data_{args.aug_times}"
            )
            os.makedirs(middle_save_dir, exist_ok=True)

        triple_datasets[rule].to_json(
            # f"{args.root_data_dir}/{dataset_name}/few_shot_{few_shot_num}/{rule}.json"
            # f"{args.root_data_dir}/{dataset_name}/few_shot_{few_shot_num}_{args.aug_times}/{rule}.json"
            f"{middle_save_dir}/{rule}.json"
            # f"{args.root_data_dir}/{dataset_name}/few_shot_{few_shot_num}_{args.aug_times}/{rule}.json"
        )

    return

    # triple_datasets.shuffle()
    def add_instruction_and_context(data):
        """添加instruction 和 context 字段"""
        context = get_context_from_relations(data["relations"])
        return {
            "relations": data["relations"],
            "context": context,
            "instruction": instruction,
        }

    # 为每个数据集添加instruction 和 context 字段
    triple_datasets = triple_datasets.map(
        add_instruction_and_context, desc="add instruction and context"
    )

    merged_triples_dataset = triple_datasets.map(
        remove_pos_for_triple_dataset,
        # load_from_cache_file=False,
        desc="remove merged pos attr",
    )

    model_name = config["model"]["model_name"]
    tokenizer_kwargs = dict(config["tokenizer"])

    if args.is_ft:
        model_path = dataset2modelpath.get(dataset_name, model_name)
    else:
        model_path = model_name

    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForSeq2SeqLM.from_pretrained(
        model_path,
        trust_remote_code=True,
        use_cache=False,
    )

    model.to("cuda")

    batch_size = 8
    max_length = 512

    if dataset_name == "conll04":
        batch_size = 12

    def tokenize_dataset(examples):
        instructions = examples["instruction"]
        context = examples["context"]

        if dataset_name in ["ADE_corpus", "SciERC", "semval-RE"]:
            context = ",".join([f"({x[0]}, {x[1]}, {x[2]})" for x in context])
        else:
            context = ",".join(
                [f"({x[0]}, {x[1]}, {x[2]}, {x[3]}, {x[4]})" for x in context]
            )

        return tokenizer(
            text=instructions,
            text_pair=context,
            **tokenizer_kwargs,
            padding="max_length",  # 使用DataCollatorForSeq2Seq进行padding
        )

    tokenized_dataset = merged_triples_dataset.map(
        tokenize_dataset,
        batched=False,
        # load_from_cache_file=False,
        desc="tokenize dataset",
        remove_columns=["context", "instruction"],
    )

    # train_dataset = tokenized_dataset

    output_datasets = DatasetDict()
    for split, train_dataset in tokenized_dataset.items():
        input_ids_batches = []

        for i in tqdm(range(0, len(train_dataset), batch_size)):
            batch_input_texts = train_dataset[i : i + batch_size]
            # batch_input_ids = tokenizer(batch_input_texts, padding=True, return_tensors="pt").input_ids
            batch_input_ids = torch.tensor(
                batch_input_texts["input_ids"], device=model.device
            )
            input_ids_batches.append(batch_input_ids)

        output_texts = []
        for batch_input_ids in tqdm(
            input_ids_batches, desc=f"Processing batches for {split}"
        ):
            batch_output_ids = model.generate(
                batch_input_ids,
                max_length=max_length,
                num_beams=4,
                early_stopping=True,
                do_sample=True,
                min_length=20,
                length_penalty=2.0,
                temperature=0.5,
            )
            batch_output_texts = [
                tokenizer.decode(output_id, skip_special_tokens=True)
                for output_id in batch_output_ids
            ]
            output_texts.extend(batch_output_texts)

        output_datasets[split] = output_texts

    all_generated_datasets = DatasetDict()

    for rule in triple_rules:
        triple_dataset = triple_datasets[rule]
        output_texts = output_datasets[rule]
        all_generated_datasets[rule] = merge_output_and_origin_triples(
            triple_dataset, output_texts
        )
    print("all_generated_datasets", all_generated_datasets)

    # 对于每个生成的数据集，将其保存为json文件
    for r, d in all_generated_datasets.items():
        d.to_json(f"{current_data_dir}/{r}.json")

    # 对于每个rule,从其中选择per_rule_num个样本，合并为一个数据集
    prepared_to_merge_datasets = []
    for rule in triple_rules:
        sample_num = (
            all_generated_datasets[rule].num_rows
            if all_generated_datasets[rule].num_rows < per_rule_num
            else per_rule_num
        )
        prepared_to_merge_datasets.append(
            all_generated_datasets[rule].select(range(sample_num))
        )

    all_output_dataset = concatenate_datasets(prepared_to_merge_datasets)
    # 从中选择total_train_num个样本

    all_output_dataset.to_json(f"{current_data_dir}/new_dataset.json")
    merge_train_dataset = concatenate_datasets(
        [raw_datasets["train"], all_output_dataset]
    )

    merge_train_dataset.to_json(f"{current_data_dir}/merge_train_dataset.json")

    # os.makedirs("output/test/conll04", exist_ok=True)
    # new_dataset.to_json(
    #     f"{current_data_dir}/merge.json"
    # )
    # 将所有参数保存在 args.json 文件中，放入save_dir_name中
    with open(
        f"{current_data_dir}/args.json",
        "w",
    ) as f:
        json.dump(vars(args), f, ensure_ascii=False, indent=4)

    # 保存 path_to_model_config 文件到 save_dir_name 中
    os.system(f"cp {args.path_to_model_config} {current_data_dir}/config.yaml")


# 对于raw_datasets["train"]中的每一个样本，对于relations 字段，将其头实体和尾实体进行替换
if __name__ == "__main__":
    main()
