from instruction_re.core.datatypes import TaskType, AnswerType, ContextFormat

from instruction_re.formatters import *
from datasets import concatenate_datasets, Dataset
from typing import Union, Dict, List, Tuple
from pathlib import Path
import os
import json

from .utils import loads_json, load_json


import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"


def get_label2token_by_dataset(
    dataset_name: Union[str, Path], replace_labels_with_special_tokens: bool = False
):
    """将数据集中的label 转换为 label2token的字典形式

    Args:
        dataset_name (Union[str, Path]): 数据集名称

    """
    labels = load_json(os.path.join(dataset_name, "labels.json"))
    labels = [s.lower() for s in labels]

    # 只替换 organization based in 为 <organization_based_in>，其余不替换

    if replace_labels_with_special_tokens:
        if isinstance(dataset_name, Path):
            dataset_name = str(dataset_name)

        if dataset_name.endswith("GIDS"):
            return {
                "place of death": "die in",
                "place of birth": "born in",
                "education degree": "<educatioin_degree>",
                "education institution": "<education_institution>",
            }

        # elif dataset_name.endswith("conll04"):
        #     return {
        #         "organization based in": "organization based in",
        #         "kill": "<kill>",
        #         # "live in": "<live_in>",
        #         "live in": "live in",
        #         "work in": "work in",
        #     }
        # elif dataset_name.endswith("SciERC"):
        #     return {
        #         "conjunction": "<conjunction>",
        #         "feature of": "<feature_of>",
        #         "hyponym of": "<hyponym_of>",
        #         "used for": "<used_for>",
        #         "part of": "<part_of>",
        #         "compare": "<compare>",
        #         "evaluate for": "<evaluate_for>",
        #     }
        elif dataset_name.endswith("ADE_corpus"):
            return {
                "adverse effect": "<adverse_effect>",
            }

        label_tokens = [
            (
                "<" + "_".join(label.split(" ")) + ">"
                if len(label.split(" ")) >= 3
                else label
                # if label == "organization based in"
                # else label
            )
            for label in labels
        ]
        label2token = {label: token for label, token in zip(labels, label_tokens)}
    else:
        label2token = {label: label for label in labels}

    return label2token


def get_entity_labels(dataset_name: Union[str, Path]) -> List[str]:
    """获取数据集中的实体类型

    Args:
        dataset_name (Union[str, Path]): 数据集名称

    Returns:
        List[str]: 返回数据集中的实体类型列表
    """
    entity_label_path = f"{dataset_name}/entity_labels.json"
    return load_json(entity_label_path)


def format_datasets_from_tasks(
    tasks: TaskType,
    dataset: Dataset,
    instructions: Dict,
    # options: List[str],
    # label2token: Dict[str, str],
    # answer_type: AnswerType,
    # context_format: ContextFormat,
    **kwargs,
) -> Dataset:
    dataset_name = Path(kwargs.pop("dataset_name"))

    label_path = dataset_name / "labels.json"

    options = [s.lower() for s in load_json(label_path)]

    label2token = get_label2token_by_dataset(
        dataset_name, kwargs.get("replace_label_to_special_token", False)
    )
    kwargs["label2token"] = label2token

    def get_task_formatter(**kwargs) -> Dict:
        """根据不同的任务类型，返回对应的数据格式化类

        Returns:
            _type_: Dict
        """

        return {
            TaskType.RC_NO_ICL.value: RETaskFormatter(
                with_icl=False,
                **kwargs,
            ),
            TaskType.RE.value: RETaskFormatter(
                re_strict=False,
                **kwargs,
            ),
            TaskType.RE_STRICT.value: RETaskFormatter(**kwargs, re_strict=True),
            TaskType.RC_W_EXP.value: RETaskFormatter(
                with_icl=False,
                with_examples=True,
                **kwargs,
            ),
            TaskType.RC.value: RETaskFormatter(with_icl=True, **kwargs),
            TaskType.TE.value: RelTypeExtractFormatter(
                set_option=True, label2token=label2token
            ),
            TaskType.TE_NO_OPTIONS.value: RelTypeExtractFormatter(
                set_option=False, label2token=label2token, sep=kwargs["sep"]
            ),
            TaskType.OE.value: ObjExtractFormatter(),
            TaskType.SE.value: SubjectExtractFormatter(),
            TaskType.EPE.value: EntityPairExtractFormatter(
                **kwargs,
            ),
            # TaskType.NER.value: NERTask(sep=kwargs["sep"]),
            TaskType.NER.value: NERTask(template_order=kwargs["template_order"]),
        }

    result = []

    task_to_formatter = get_task_formatter(**kwargs)

    for task in tasks:
        format_kwargs = {"instruction": instructions[task.value]}
        instruction = instructions.get(task.value)

        if instruction is None:
            raise ValueError(f"Instruction for task {task.value} is not provided.")

        remove_columns = ["relations", "sentence"]

        # 关系类型提取，关系分类任务，都需要options
        if task in [
            TaskType.RC,
            TaskType.RE,
            TaskType.RC_NO_ICL,
            TaskType.RC_W_EXP,
            TaskType.TE,
            TaskType.TE_NO_OPTIONS,
            TaskType.RE_STRICT,
        ]:
            # kwargs["options"] = options
            # kwargs["label2token"] = label2token
            # if kwargs.get("replace_label_to_special_token", False):
            format_kwargs["options"] = list(label2token.values())
            # else:
            #     format_kwargs["options"] = options

            if task == TaskType.RE_STRICT:
                format_kwargs["options"] += get_entity_labels(dataset_name)

        elif task in [TaskType.NER]:
            format_kwargs["options"] = get_entity_labels(dataset_name)

        if task in [TaskType.RC_W_EXP, TaskType.RC]:
            remove_columns.append("examples")

        result.append(
            dataset.map(
                lambda x: task_to_formatter[task.value].format_data_sample(
                    x,
                    **format_kwargs,
                ),
                remove_columns=remove_columns,
                desc=f"formatting {task.value}",
                load_from_cache_file=False,
            )
        )

    return concatenate_datasets(result)


def save_entity_label_from_train_path(train_path, save_path):
    data = load_json(train_path)
    entity_labels = set()
    for sample in data:
        for rel in sample["relations"]:
            head = rel["head"]
            tail = rel["tail"]
            if head.get("type", None) not in ["NA", None, ""]:
                entity_labels.add(head["type"])

            if tail.get("type", None) not in ["NA", None, ""]:
                entity_labels.add(tail["type"])

    unique_entity_labels = list(set(entity_labels))
    unique_entity_labels = [s.lower() for s in unique_entity_labels]
    # 保存为 json 文件
    with open(save_path, "w") as f:
        json.dump(unique_entity_labels, f)


# def get_data_config(
#     root_dir: Union[str, Path],
#     dataset_name: str,
#     training_num: int = -1,
#     merge_dataset_dir: str = None,
#     rules: str = "",
#     is_few_shot: bool = False,
# ):
#     """获取数据集的配置信息

#     Args:
#         root_dir (Union[str, Path]): 数据集存放的root目录
#         dataset_name (str): 数据集名称
#         training_num (int): 要获取的小样本数量

#     Raises:
#         ValueError: _description_

#     Returns:
#         _type_: Dict[str, str]: 返回对应数据集的配置信息
#     """
#     dataset_dir = f"{root_dir}/{dataset_name}"

#     # 判断 dataset_dir 是否存在
#     if not os.path.exists(dataset_dir):
#         raise ValueError(f"dataset dir: {dataset_dir} not exists.")

#     # 判断是否存在 entity_labels.json 文件，如果不存在，从 train.json 中提取实体类型
#     entity_labels_path = f"{dataset_dir}/entity_labels.json"
#     if not os.path.exists(entity_labels_path):
#         save_entity_label_from_train_path(
#             f"{dataset_dir}/train.json", entity_labels_path
#         )

#     # 如果 training_num 为 -1，表示使用全部训练集
#     if training_num == -1:
#         if len(rules.split()) > 0 and merge_dataset_dir is not None:
#             train_path = f"{dataset_dir}/{merge_dataset_dir}/merge_train_dataset.json"
#             valid_path = f"{dataset_dir}/remove_pos_attr_dev.json"
#             test_path = f"{dataset_dir}/remove_pos_attr_test.json"
#         elif is_few_shot:
#             if "semval" in dataset_name:
#                 train_path = f"{dataset_dir}/few_shot_5.json"
#             train_path = f"{dataset_dir}/train.json"
#             valid_path = f"{dataset_dir}/dev.json"
#             test_path = f"{dataset_dir}/test.json"
#         return {
#             "train": train_path,
#             "valid": valid_path,
#             "test": test_path,
#         }

#     # 查看是否有 sample 数据集
#     if not os.path.exists(f"{dataset_dir}/sample_{training_num}.json"):
#         labels = loads_json(f"{dataset_dir}/ labels.json")
#         k_shot_sample(f"{dataset_dir}/train.json", dataset_dir, labels, training_num)

#     return {
#         "train": f"{dataset_dir}/sample_{training_num}.json",
#         "valid": f"{dataset_dir}/dev.json",
#         "test": f"{dataset_dir}/test.json",
#         # "labels": f"{dataset_dir}/labels.json",
#     }


def get_data_config(
    root_dir: Union[str, Path],
    dataset_name: str,
    training_num: int = -1,
    merge_dataset_dir: str = None,
    rules: str = "",
    is_few_shot: bool = False,
    few_shot_num: int = 50,
) -> Dict[str, str]:
    """
    获取数据集的配置信息

    Args:
        root_dir (Union[str, Path]): 数据集存放的root目录
        dataset_name (str): 数据集名称
        training_num (int, optional): 要获取的小样本数量. Defaults to -1.
        merge_dataset_dir (str, optional): 合并数据集目录. Defaults to None.
        rules (str, optional): 规则字符串. Defaults to "".
        is_few_shot (bool, optional): 是否为少样本学习. Defaults to False.

    Raises:
        ValueError: 当数据集目录不存在时抛出

    Returns:
        Dict[str, str]: 返回对应数据集的配置信息
    """
    dataset_dir = Path(root_dir) / dataset_name

    # 检查数据集目录是否存在
    if not dataset_dir.exists():
        raise ValueError(f"Dataset directory: {dataset_dir} does not exist.")

    # 确保实体标签文件存在
    ensure_entity_labels_file(dataset_dir)

    # 确定训练集、验证集和测试集的路径
    train_path, valid_path, test_path = determine_dataset_paths(
        dataset_dir,
        training_num,
        merge_dataset_dir,
        rules,
        is_few_shot,
        few_shot_num=few_shot_num,
    )

    return {
        "train": str(train_path),
        "valid": str(valid_path),
        "test": str(test_path),
    }


def determine_dataset_paths(
    dataset_dir: Path,
    training_num: int,
    merge_dataset_dir: str,
    rules: str,
    is_few_shot: bool,
    few_shot_num: int = 50,
) -> tuple:
    """确定训练集、验证集和测试集的路径"""
    if is_few_shot:
        print("few shot")
        return get_few_shot_paths(dataset_dir, merge_dataset_dir, rules, few_shot_num)

    if training_num == -1:
        return get_full_dataset_paths(
            dataset_dir,
            merge_dataset_dir,
            rules,
        )
    else:
        return get_sampled_dataset_paths(dataset_dir, training_num)


def get_full_dataset_paths(
    dataset_dir: Path,
    merge_dataset_dir: str,
    rules: str,
) -> tuple:
    """获取全量数据集的路径"""

    if rules and merge_dataset_dir:
        train_path = dataset_dir / merge_dataset_dir / "merge_train_dataset.json"
        valid_path = dataset_dir / "remove_pos_attr_dev.json"
        test_path = dataset_dir / "remove_pos_attr_test.json"

    else:
        train_path = dataset_dir / "train.json"
        valid_path = dataset_dir / "dev.json"
        test_path = dataset_dir / "test.json"

    return train_path, valid_path, test_path


def get_few_shot_paths(
    dataset_dir: Union[str, Path],
    merge_dataset_dir: Union[str, Path],
    rules: str,
    few_shot_num: int = 50,
) -> Tuple[Path, Path, Path]:
    """获取少样本学习的数据集路径"""
    dataset_dir = Path(dataset_dir)  # 确保 dataset_dir 是 Path 对象

    if rules and merge_dataset_dir:
        train_path = dataset_dir / merge_dataset_dir / "merge_train_dataset.json"
        valid_path = dataset_dir / "remove_pos_attr_dev.json"
        test_path = dataset_dir / "remove_pos_attr_test.json"
    else:
        if few_shot_num == 0:
            train_path = dataset_dir / "few_shot.json"
        else:
            train_path = dataset_dir / f"few_shot_{few_shot_num}.json"
        valid_path = dataset_dir / "remove_pos_attr_dev.json"
        test_path = dataset_dir / "remove_pos_attr_test.json"

    return train_path, valid_path, test_path


def ensure_entity_labels_file(dataset_dir: Path):
    """确保实体标签文件存在，如果不存在则从训练集中提取"""
    entity_labels_path = dataset_dir / "entity_labels.json"
    if not entity_labels_path.exists():
        save_entity_label_from_train_path(
            dataset_dir / "train.json", entity_labels_path
        )


def get_sampled_dataset_paths(dataset_dir: Path, training_num: int) -> tuple:
    """获取采样数据集的路径，如果采样文件不存在则创建"""
    sample_path = dataset_dir / f"sample_{training_num}.json"
    if not sample_path.exists():
        labels = loads_json(dataset_dir / "labels.json")
        k_shot_sample(dataset_dir / "train.json", dataset_dir, labels, training_num)
    return sample_path, dataset_dir / "dev.json", dataset_dir / "test.json"


def get_hyperparametes_for_training_num(num: int):
    # 根据 training_num 设置 per_device_train_batch_size
    traing_num2batch_size = {10: 2, 20: 4, 30: 8, 50: 8, -1: 4}
    trainig_num2learning_rate = {10: 2e-5, 20: 2e-5, 30: 3e-5, 50: 5e-5, -1: 1e-4}
    per_device_train_batch_size = traing_num2batch_size.get(num, 2)
    learning_rate = trainig_num2learning_rate.get(num, 5e-5)

    return per_device_train_batch_size, learning_rate


# 从数据集抽取 num 个样本
def sample_dataset(data_path, save_dir, num=10):
    data = loads_json(data_path)
    # 打乱
    import random

    random.shuffle(data)

    print("train data length:", len(data))

    save_data = data[:num]

    save_data_path = os.path.join(save_dir, f"sample_{num}.json")

    # 保存为 json 文件
    with open(save_data_path, "w") as f:
        json.dump(save_data, f)


from pathlib import Path


def k_shot_sample(
    data_path: Path, save_dir: Path, options: List[str], num: int = 10
) -> None:
    """对于数据集中的每个关系，抽取 num 个样本

    Args:
        data_path (Path): 要抽取的数据集的json 路径
        save_dir (Path): 要保存的目录
        options (List[str]): 数据集中的关系列表
        num (int, optional): 每个类型要抽取的样本个数. Defaults to 10.
    """

    data = loads_json(data_path)

    # 打乱数据

    samples = []

    sample_num = {v: 0 for v in options}

    def is_enough():
        for v in sample_num.values():
            if v < num:
                return False
        return True

    i = 0
    while not is_enough():
        d = data[i]
        for r in d["relations"]:
            if sample_num[r["type"]] < num:
                samples.append(d)
                sample_num[r["type"]] += 1
            else:  # 如果已经抽取了 num 个样本，跳过
                continue
        i += 1

    save_data_path = os.path.join(save_dir, f"sample_{num}.json")

    # 保存为 json 文件
    with open(save_data_path, "w") as f:
        json.dump(samples, f)

    return save_data_path
