from datasets import load_dataset, Dataset, concatenate_datasets, DatasetDict
import numpy as np
from instruction_re.utils.utils import load_json, load_config, set_global_seed

from instruction_re.utils.data_utils import (
    get_data_config,
)
from instruction_re.utils.data_aug_utils import (
    get_entity_by_type_json,
    get_relations_by_type_json,
    group_entity_by_type,
)

from data_aug_arg_parse import get_train_args

# from utils import group_entity_by_type
from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
)
import json
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import os
from tqdm import tqdm
from typing import Union, Dict, List
from pathlib import Path
from tqdm import tqdm

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from datetime import datetime


def get_aug_triple_config(current_dir: Union[str, Path], rules: List[str]):
    """获取数据集的配置信息"""

    # 判断 dataset_dir 是否存在
    if not os.path.exists(current_dir):
        raise ValueError(f"dataset dir: {current_dir} not exists.")

    return {r: f"{current_dir}/{r}.json" for r in rules}


def setup_ddp(rank, world_size):
    """
    设置分布式数据并行环境
    """
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"

    # 初始化进程组
    dist.init_process_group("nccl", rank=rank, world_size=world_size)


def cleanup():
    """
    清理分布式数据并行环境
    """
    dist.destroy_process_group()


# def get_few_shot_data(
#     root_dir: Union[str, Path],
#     dataset_name: str,
# ):
#     dataset_dir = f"{root_dir}/{dataset_name}"

#     if "semval" in dataset_name:
#         return {
#             "train": f"{dataset_dir}/few_shot_50.json",
#             "valid": f"{dataset_dir}/dev.json",
#             "test": f"{dataset_dir}/test.json",
#         }

#     return {
#         "train": f"{dataset_dir}/few_shot_50.json",
#         "valid": f"{dataset_dir}/dev.json",
#         "test": f"{dataset_dir}/test.json",
#     }


def get_context_from_relations(relations: List) -> List:
    """从原始数据集的relations字段中提取元组形式的列表"""
    context = []
    for r in relations:
        context.append(
            (
                r["head"]["name"],
                r["head"]["type"],
                r["type"],
                r["tail"]["name"],
                r["tail"]["type"],
            )
        )
    return context


def remove_pos_for_triple_dataset(data):
    """删除数据集中的relations字段中的head 和 tail 的pos属性"""
    relations = []
    for r in data["relations"]:
        relations.append(
            {
                "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
                "type": r["type"],
                "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
            }
        )
    return {
        "relations": relations,
    }


def remove_pos_attr(data):
    """清理数据集relations字段中head 和 tail 的pos属性"""
    relations = []
    for r in data["relations"]:
        relations.append(
            {
                "head": {"name": r["head"]["name"], "type": r["head"]["type"]},
                "type": r["type"],
                "tail": {"name": r["tail"]["name"], "type": r["tail"]["type"]},
            }
        )
    return {
        "sentence": data["sentence"],
        "relations": relations,
    }


# 随机交换两个triple的位置
def swap_triple(data):
    index = np.random.choice(len(data["relations"]) - 1)

    # 交换 index 和 index+1 位置的triple
    temp = data["relations"][index]
    data["relations"][index] = data["relations"][index + 1]
    data["relations"][index + 1] = temp

    return {
        "relations": data["relations"],
    }


# def swap_head_tail(data):
#     """随机选择一个triple，交换head 和 tail，并替换为相反方向的 relation type"""
#     index = np.random.choice(len(data["relations"]))

#     sample = data["relations"][index]

#     head = sample["head"]
#     tail = sample["tail"]
#     rel_type = sample["type"]

#     sample["head"] = tail
#     sample["tail"] = head
#     sample["type"] = np.random.choice(similar_relations[rel_type]["opposite_direction"])
#     data["relations"][index] = sample

#     return {
#         "relations": data["relations"],
#         # "rule": "swap_head_tail",
#     }


def get_alternative_relations_by_dataset(dataset_name):
    """根据数据集名称获取相近关系类型，用于替换关系类型 replace_relation"""
    alternative_relations = load_json(f"./alternative_relations.json")
    return alternative_relations.get(
        dataset_name,
    )


def calculate_sample_times(total_num, per_rule_num):
    """swap_triple 和 remove_triple 的采样次数计算"""
    if per_rule_num == 0:
        raise ValueError("greater_than_2_num 不能为空")

    if total_num % per_rule_num == 0:
        per_rule_num = total_num // per_rule_num
    else:
        per_rule_num = total_num // per_rule_num + 1

    return per_rule_num


# 随机删除一个triple
def remove_triple(data):
    # 随机删除一个triple
    relations = data["relations"]

    index = np.random.choice(len(relations))
    relations.pop(index)

    return {
        "relations": relations,
    }


def merge_output_and_origin_triples(triple_dataset, output_texts):
    output_dataset = []
    for sample, text in zip(triple_dataset, output_texts):
        entity_list = set()
        relations = []
        for r in sample["relations"]:
            head = r["head"]["name"]
            tail = r["tail"]["name"]
            head_start_index = text.find(head)
            # head_end_index = head_start_index + len(head)

            tail_start_index = text.find(tail)
            # tail_end_index = tail_start_index + len(tail)

            relations.append(
                {
                    "head": {
                        "name": head,
                        "type": r["head"]["type"],
                        # "pos": [head_start_index, head_end_index],
                    },
                    "type": r["type"],
                    "tail": {
                        "name": tail,
                        "type": r["tail"]["type"],
                        # "pos": [tail_start_index, tail_end_index],
                    },
                }
            )

            entity_list.add(head)
            entity_list.add(tail)

        # 简单的清理操作，检查生成的文本是否包含实体，如果不包含，则跳过
        if not any(entity in text for entity in entity_list):
            continue

        output_dataset.append(
            {
                "sentence": text,
                "relations": relations,
                # "rule": sample["rule"],
            }
        )
    return Dataset.from_list(output_dataset)


def main():
    args = get_train_args()
    triple_rules = args.rules.split()
    dataset_name = args.dataset_name
    # now = datetime.now()
    # few_shot_num = 50
    few_shot_num = args.few_shot_num
    is_few_shot = 1

    config = load_config(args.path_to_model_config)
    if not is_few_shot:
        add_num = 50
    else:
        add_num = 20 if few_shot_num == 100 else 30 if few_shot_num == 150 else 20

    origin_train_path = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=is_few_shot,
        few_shot_num=few_shot_num,
    )["train"]
    origin_train_dataset = load_dataset("json", data_files={"train": origin_train_path})

    raw_count = origin_train_dataset["train"].num_rows
    print("raw_count", raw_count)
    aug_total_num = raw_count * args.aug_times
    per_rule_count = aug_total_num // len(triple_rules) + add_num

    # set_global_seed(config["seed"])
    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot/conll04/google/flan-t5-large/0906_23_38",
    #     "SciERC": "data_aug/few_shot/SciECR_with_entity_processed/google/flan-t5-large/0906_23_40",
    #     "semval-RE": "data_aug/few_shot/semval-RE/google/flan-t5-large/0906_23_50",
    # }

    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot_50/conll04/google/flan-t5-large/0910_15_39",
    #     "SciERC": "data_aug/few_shot_50/SciERC/google/flan-t5-large/0910_15_40",
    #     "semval-RE": "data_aug/few_shot_50/semval-RE/google/flan-t5-large/0910_15_42",
    # }

    # dataset2modelpath = {
    #     "conll04": "data_aug/few_shot_150/ft_models/conll04/google/flan-t5-large/",
    #     "SciERC": "data_aug/few_shot_150/ft_models/SciERC/google/flan-t5-large/",
    #     "semval-RE": "data_aug/few_shot_150/ft_models/semval-RE/google/flan-t5-large/",
    # }
    dataset2modelpath = {
        "conll04": "data_aug/few_shot10%/conll04/0921_03_16",
        "SciERC": "data_aug/few_shot10%/SciERC",
        "semval-RE": "data_aug/few_shot10%/semval-RE/0921_03_23",
    }

    set_global_seed(args.seed)
    dataset_name = args.dataset_name
    data_config = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=is_few_shot,
        few_shot_num=few_shot_num,
    )

    instruction = "Generate a natural language sentence that expresses the following relation triple:"
    raw_datasets = load_dataset("json", data_files=data_config)

    # print(triple_rules)
    current_data_dir = f"{args.root_data_dir}/{dataset_name}/{args.save_dir_name}"

    # 创建当前数据集的文件夹
    os.makedirs(current_data_dir, exist_ok=True)
    base_dir = f"{args.root_data_dir}/{dataset_name}"

    if is_few_shot:
        if few_shot_num != 0:
            root_data_dir = f"{base_dir}/few_shot_{few_shot_num}_{args.aug_times}"
        else:
            root_data_dir = f"{base_dir}/few_shot10%_{args.aug_times}"
    else:
        root_data_dir = f"{base_dir}/full_data_{args.aug_times}"

    # triple_datasets.shuffle()
    def add_instruction_and_context(data):
        """添加instruction 和 context 字段"""
        context = get_context_from_relations(data["relations"])
        return {
            "relations": data["relations"],
            "context": context,
            "instruction": instruction,
        }

    rule_json_files = {rule: f"{root_data_dir}/{rule}.json" for rule in triple_rules}
    triple_datasets = load_dataset("json", data_files=rule_json_files)
    # 对于每个数据集，保留 per_rule_count 个样本
    # triple_datasets = triple_datasets.map(
    #     lambda x: x.select(range(per_rule_count)), desc="select samples"
    # )
    select_num = (
        per_rule_count
        if len(triple_rules) > 1
        else origin_train_dataset["train"].num_rows
    )
    triple_datasets = {
        k: v.shuffle(seed=args.seed).select(range(select_num))
        for k, v in triple_datasets.items()
    }

    triple_datasets = DatasetDict(triple_datasets)

    # 为每个数据集添加instruction 和 context 字段
    triple_datasets = triple_datasets.map(
        add_instruction_and_context, desc="add instruction and context"
    )

    model_name = config["model"]["model_name"]
    tokenizer_kwargs = dict(config["tokenizer"])

    if args.is_ft:
        model_path = dataset2modelpath.get(dataset_name, model_name)
    else:
        model_path = model_name

    print(model_path)

    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForSeq2SeqLM.from_pretrained(
        model_path,
        trust_remote_code=True,
        use_cache=False,
    )

    model.to("cuda")

    batch_size = 8
    max_length = 512

    if dataset_name == "conll04":
        batch_size = 12

    def tokenize_dataset(examples):
        instructions = examples["instruction"]
        context = examples["context"]

        if dataset_name in ["ADE_corpus", "SciERC", "semval-RE"]:
            context = ",".join([f"({x[0]}, {x[1]}, {x[2]})" for x in context])
        else:
            context = ",".join(
                [f"({x[0]}, {x[1]}, {x[2]}, {x[3]}, {x[4]})" for x in context]
            )

        return tokenizer(
            text=instructions,
            text_pair=context,
            **tokenizer_kwargs,
            padding="max_length",  # 使用DataCollatorForSeq2Seq进行padding
        )

    tokenized_dataset = triple_datasets.map(
        tokenize_dataset,
        batched=False,
        # load_from_cache_file=False,
        desc="tokenize dataset",
        remove_columns=["context", "instruction"],
    )

    output_datasets = DatasetDict()

    for split, train_dataset in tokenized_dataset.items():
        input_ids_batches = []

        for i in tqdm(range(0, len(train_dataset), batch_size)):
            batch_input_texts = train_dataset[i : i + batch_size]
            # batch_input_ids = tokenizer(batch_input_texts, padding=True, return_tensors="pt").input_ids
            batch_input_ids = torch.tensor(
                batch_input_texts["input_ids"], device=model.device
            )
            input_ids_batches.append(batch_input_ids)

        output_texts = []
        for batch_input_ids in tqdm(
            input_ids_batches, desc=f"Processing batches for {split}"
        ):
            batch_output_ids = model.generate(
                batch_input_ids,
                max_length=max_length,
                num_beams=4,
                early_stopping=True,
                do_sample=True,
                min_length=20,
                length_penalty=2.0,
                temperature=0.5,
            )
            batch_output_texts = [
                tokenizer.decode(output_id, skip_special_tokens=True)
                for output_id in batch_output_ids
            ]
            output_texts.extend(batch_output_texts)

        output_datasets[split] = output_texts

    all_generated_datasets = DatasetDict()

    for rule in triple_rules:
        triple_dataset = triple_datasets[rule]
        output_texts = output_datasets[rule]
        all_generated_datasets[rule] = merge_output_and_origin_triples(
            triple_dataset, output_texts
        )
    print("all_generated_datasets", all_generated_datasets)

    # 对于每个生成的数据集，将其保存为json文件
    for r, d in all_generated_datasets.items():
        d.to_json(f"{current_data_dir}/{r}.json")

    # 对于每个rule,从其中选择per_rule_num个样本，合并为一个数据集
    prepared_to_merge_datasets = []
    per_rule_save_count = aug_total_num // len(triple_rules) + 1

    for rule in triple_rules:
        sample_num = (
            all_generated_datasets[rule].num_rows
            if all_generated_datasets[rule].num_rows < per_rule_save_count
            else per_rule_save_count
        )
        prepared_to_merge_datasets.append(
            all_generated_datasets[rule].select(range(sample_num))
        )

    all_output_dataset = concatenate_datasets(prepared_to_merge_datasets)
    # 从中选择total_train_num个样本

    all_output_dataset = all_output_dataset.shuffle(seed=args.seed).select(
        range(aug_total_num)
    )

    all_output_dataset.to_json(f"{current_data_dir}/new_dataset.json")
    merge_train_dataset = concatenate_datasets(
        [raw_datasets["train"], all_output_dataset]
    )

    merge_train_dataset.to_json(f"{current_data_dir}/merge_train_dataset.json")

    # os.makedirs("output/test/conll04", exist_ok=True)
    # new_dataset.to_json(
    #     f"{current_data_dir}/merge.json"
    # )
    # 将所有参数保存在 args.json 文件中，放入save_dir_name中
    with open(
        f"{current_data_dir}/args.json",
        "w",
    ) as f:
        json.dump(vars(args), f, ensure_ascii=False, indent=4)

    # 保存 path_to_model_config 文件到 save_dir_name 中
    os.system(f"cp {args.path_to_model_config} {current_data_dir}/config.yaml")


# 对于raw_datasets["train"]中的每一个样本，对于relations 字段，将其头实体和尾实体进行替换
if __name__ == "__main__":
    main()
