import json
from typing import Any, Dict, List, Optional, Tuple

import pandas as pd
from transformers import T5ForConditionalGeneration, T5Tokenizer
from ..core.datatypes import Preffix, Relation, Entity


import re


def update_best_checkpoint(
    metrics_new: Dict[str, Dict[str, float]],
    metrics_best: Dict[str, Dict[str, float]],
    metric_name: str,
    metric_avg: str,
    model: T5ForConditionalGeneration,
    tokenizer: T5Tokenizer,
    path_to_save_model: Optional[str],
):
    """
    Compares specific metric in two metric dictionaries: current and best.
    If new metric value is better -> new best model checkpoint saved
    :param metrics_new:
    :param metrics_best:
    :param metric_name:
    :param metric_avg:
    :param model:
    :param tokenizer:
    :param path_to_save_model:
    :return:
    """

    metric_current_value = metrics_new[metric_avg][metric_name]

    metric_best_value = 0.0
    if len(metrics_best) > 0:
        metric_best_value = metrics_best[metric_avg][metric_name]

    if metric_current_value > metric_best_value:
        print(
            f"Got Better results for {metric_name}. \n"
            f"{metric_current_value} > {metric_best_value}. Updating the best checkpoint"
        )
        metrics_best = metrics_new

        model.save_pretrained(path_to_save_model)
        tokenizer.save_pretrained(path_to_save_model)

        if path_to_save_model is not None:
            save_metrics_path = path_to_save_model + "/metrics.json"
        else:
            save_metrics_path = "metrics.json"

        with open(save_metrics_path, "w", encoding="utf-8") as f:
            json.dump(metrics_best, ensure_ascii=False, indent=4, fp=f)

    return metrics_best


def show_classification_report(metrics: Dict[str, Dict[str, float]]):
    """
    Based on dictionary of metrics show classification report aka sklearn
    :param metrics:
    :return:
    """
    df = pd.DataFrame.from_dict(metrics)
    print(df.transpose())


def clean_output_and_split_sub_sentences(output: str, sep: str = ",") -> str:
    """
    清理输出文本并根据指定分隔符分割子句

    移除输出中的句号和ANSWER前缀，按照指定的分隔符分割子句， 并移除子句前后的空格

    args:
        output: 待处理的输出文本
        sep: 子句分隔符

    Returns:
        list[str]: 子句列表
    """
    clean_output = output.replace(".", "").replace(Preffix.ANSWER.value, "")
    output_parts = clean_output.split(sep)

    # 对于每一个part, 移除前后的空格
    sentence_parts = [part.strip() for part in output_parts]

    return sentence_parts


def parse_sentence_parts_to_entities(
    sentence_parts: List[str],
    options: List[str],
) -> List[Entity]:
    """
    从句子片段中提取实体。

    解析形如 "xxx is a yyy" 或 "xxx is an yyy" 的句子，
    其中 "xxx" 被视为实体名称，"yyy" 被视为实体类型。
    仅当实体类型出现在 options 列表中时，才会创建 Entity 对象。

    Args:
        sentence_parts (List[str]): 待解析的子句表。
        options (List[str]): 有效的实体类型列表。

    Returns:
        List[Entity]: 提取出的实体列表。每个 Entity 对象包含实体名称和类型。
    """

    # 创建一个能解析 xxx is a yyy， 和 xxx is an yyy 的正则表达式
    pattern = re.compile(r"(.*) is (a|an) (.*)", re.IGNORECASE)

    entities = []
    for item in sentence_parts:
        match = pattern.match(item)

        if match is None or len(match.groups()) != 3:
            continue

        entity_name, _, entity_type = match.groups()
        if entity_type in options:
            entities.append(Entity(entity_name, entity_type))

    return entities


def parse_sentence_part_to_triple(sentence_part: str, options: List[str]):
    """
    从句子片段中提取关系三元组。

    使用候选关系类型解析句子，返回Relation对象（包含头实体、尾实体和关系类型）。
    如果无法解析或实体为空，返回None。

    Parameters:
        sentence_part: 待解析的句子片段
        options: 关系类型列表

    Returns:
        Relation对象或None
    """
    if not any([template in sentence_part for template in options]):
        # print("error pred：", prediction_part)
        return None

    for answer_template in options:
        if answer_template.startswith("<"):
            _prediction_part = sentence_part.split(f"{answer_template}", maxsplit=2)
        else:
            _prediction_part = sentence_part.split(f" {answer_template} ", maxsplit=2)

        if len(_prediction_part) != 2:
            continue

        sub, obj = (
            _prediction_part[0],
            _prediction_part[1],
        )

        sub = sub.strip(" ")
        obj = obj.strip(" ")

        if sub == "" or obj == "":
            return None

        return Relation(head=sub, tail=obj, type=answer_template)


def parse_relation_triple_with_svo_format(
    output: str, options: List[str], sep: str = ","
):
    output_parts = clean_output_and_split_sub_sentences(output, sep)

    def _get_triple_from_part(prediction_part: str, options: List[str]):
        if not any([template in prediction_part for template in options]):
            # print("error pred：", prediction_part)
            return None

        for answer_template in options:
            _prediction_part = prediction_part.split(answer_template, maxsplit=2)

            if len(_prediction_part) != 2:
                continue

            sub, obj = (
                _prediction_part[0],
                _prediction_part[1],
            )

            sub = sub.strip(" ")
            obj = obj.strip(" ")

            if sub == "" or obj == "":
                return None

            # return sub, answer_template, obj
            return Relation(head=sub, tail=obj, type=answer_template)

    # output_triples = [_get_triple_from_part(part, options) for part in output_parts]

    # # 统计 output 格式错误，导致无法解析的数量
    # error_parse_count = 0
    # error_parse_outputs = []

    output_triples = []
    for item in output_parts:
        triple = _get_triple_from_part(item, options)
        # if len(output) > 0 and triple is None:
        #     error_parse_count += 1
        #     error_parse_outputs.append(item)
        if triple is not None:
            output_triples.append(triple)

    # analysis_output = {
    #     "error_parse_count": error_parse_count,
    #     "error_parse_outputs": error_parse_outputs,
    #     "output": output,
    # }
    # if error_parse_count > 0:
    #     print(analysis_output)

    return output_triples


def parse_relation_triple_with_template_format(output: str, options: List[str]):
    # 从模版 the relation between {sub} and {obj} is {type} 中提取出sub, type, obj
    output_parts = clean_output_and_split_sub_sentences(output, sep="<sep>")

    pattern = re.compile(r"The relation between (.*) and (.*) is (.*)", re.IGNORECASE)

    def _get_triple_from_part(prediction_part: str, options: List[str]):
        if not any([template in prediction_part for template in options]):
            # print("error pred：", prediction_part)
            return None
        match = pattern.match(prediction_part)

        if match is None or len(match.groups()) != 3:
            return None

        sub, obj, rel_type = match.groups()

        if rel_type not in options:
            return None
        return Relation(head=sub, tail=obj, type=rel_type)

    # output_triples = [_get_triple_from_part(part, options) for part in output_parts]

    output_triples = []
    for item in output_parts:
        triple = _get_triple_from_part(item, options)
        if triple is not None:
            output_triples.append(triple)

    return output_triples


def parse_relation_triple_with_triple_format(
    output: str, options: List[str], sep: str = ","
):
    # 从模版 the relation between {sub} and {obj} is {type} 中提取出sub, type, obj

    output_parts = clean_output_and_split_sub_sentences(output, sep)

    pattern = re.compile(r"\((.*),(.*),(.*)\)", re.IGNORECASE)

    def _get_triple_from_part(prediction_part: str, options: List[str]):
        if not any([template in prediction_part for template in options]):
            # print("error pred：", prediction_part)
            return None
        match = pattern.match(prediction_part)

        if match is None or len(match.groups()) != 3:
            return None

        (
            sub,
            rel_type,
            obj,
        ) = match.groups()

        # print(Relation(head=sub, tail=obj, type=rel_type))

        if rel_type not in options:
            return None
        return Relation(head=sub, tail=obj, type=rel_type)

    # output_triples = [_get_triple_from_part(part, options) for part in output_parts]

    output_triples = []
    for item in output_parts:
        triple = _get_triple_from_part(item, options)
        if triple is not None:
            output_triples.append(triple)

    return output_triples


def parse_output_to_entities_and_triples(
    output: str, options: List[str], entity_labels: List[str], sep: str = ","
) -> Tuple[List[Entity], List[Relation]]:
    """解析输出，提取实体列表和三元组列表

    Args:
        output (str): 输出字符串
            示例: "John is a person, Mary is a person, John likes Mary"
        options (List[str]): 候选关系类型列表
        entity_labels (List[str]): 候选实体类型列表
        sep (str, optional): 分隔符. Defaults to ",".

    Returns:
        Tuple[List[Entity], List[Relation]]: 包含实体列表和关系列表的元组
    """
    sentence_parts = clean_output_and_split_sub_sentences(output, sep)

    entities = parse_sentence_parts_to_entities(sentence_parts, entity_labels)
    relations = []

    for item in sentence_parts:
        relation = parse_sentence_part_to_triple(item, options)
        if relation is not None:
            relations.append(relation)

    return entities, relations


def parse_re_task_output_to_entities_and_triples(
    output: str, relation_types: List[str], entity_labels=None, sep: str = ","
) -> Tuple[List[Entity], List[Relation]]:
    """解析关系抽取任务的输出，提取实体和关系。

    与parse_output_to_entities_and_triples不同，本函数专注于关系抽取，
    不需要预定义的实体类型，而是从关系中自动提取实体。

    Parameters:
        output: 例如 "John likes Mary, Company employs John"
        relation_types: 候选关系类型列表
        sep: 分隔符，默认为逗号

    Returns:
        包含实体列表和关系列表的元组，所有实体类型均为"entity"

    """
    sentence_parts = clean_output_and_split_sub_sentences(output, sep)

    entities = []
    relations = []

    for item in sentence_parts:
        relation = parse_sentence_part_to_triple(item, relation_types)
        if relation is not None:
            entities.append(Entity(relation.head, "entity"))
            entities.append(Entity(relation.tail, "entity"))
            relations.append(relation)

    return entities, relations


def parse_re_task_output_to_quintuples(
    output: str, relation_types: List[str], entity_labels=None, sep: str = ","
) -> Tuple[List[Entity], List[Relation]]:
    """解析关系抽取任务的输出，提取实体和关系。

    与parse_output_to_entities_and_triples不同，本函数专注于关系抽取，
    不需要预定义的实体类型，而是从关系中自动提取实体。

    Parameters:
        output: 例如 "John likes Mary, Company employs John"
        relation_types: 候选关系类型列表
        sep: 分隔符，默认为逗号

    Returns:
        包含实体列表和关系列表的元组，所有实体类型均为"entity"

    """
    sentence_parts = clean_output_and_split_sub_sentences(output, sep)

    relations = []

    for item in sentence_parts:
        relation = parse_sentence_part_to_triple(item, relation_types)
        if relation is not None:
            relations.append(relation)

    quintuples = [(r.head, "entity", r.type, r.tail, "entity") for r in relations]

    return quintuples


def parse_output_to_quintuples(
    output: str, options: List[str], entity_labels: List[str], sep: str = ","
) -> List[Tuple[str, str, str, str, str]]:
    """
    解析输出字符串，提取实体和关系，并生成五元组。

    此函数首先提取实体和关系，然后将它们组合成五元组形式：
    (头实体, 头实体类型, 关系类型, 尾实体, 尾实体类型)

    Parameters:
        output (str): 待解析的输出字符串。
               示例: "John is a person, Mary is a student, John likes Mary"
        options (List[str]): 关系类型列表，如 ["likes", "works for"]
        entity_labels (List[str]): 实体类型列表，如 ["person", "student"]
        sep (str, optional): 用于分割输出的分隔符，默认为逗号

    Returns:
        List[Tuple[str, str, str, str, str]]: 五元组列表，每个元组包含：
            - 头实体名称
            - 头实体类型
            - 关系类型
            - 尾实体名称
            - 尾实体类型

    """

    sentence_parts = clean_output_and_split_sub_sentences(output, sep)

    # 解析出实体列表
    entities = parse_sentence_parts_to_entities(sentence_parts, entity_labels)

    # 将entities 转换为 key-value 字典
    entities_dict = {e.name: e.type for e in entities}

    # 解析出关系三元组
    relations = []
    for item in sentence_parts:
        relation = parse_sentence_part_to_triple(item, options)
        if relation is not None:
            relations.append(relation)

    # 将关系三元组解析为(head, head_type, relation_type, tail, tail_type)
    quintuples = []

    for r in relations:
        head = r.head
        tail = r.tail

        head_type = entities_dict.get(head, None)
        tail_type = entities_dict.get(tail, None)

        if head_type is None or tail_type is None:
            continue

        quintuples.append((head, head_type, r.type, tail, tail_type))

    return quintuples


def parse_simple_template_output_to_quintuples(
    output: str, relation_types: List[str], entity_labels: List[str], sep: str = ","
) -> List[Tuple[str, str, str, str, str]]:
    """根据类似输出Havana (location) <organization_based_in> Radio Reloj Network (organization) 解析五元组"""
    sentence_parts = clean_output_and_split_sub_sentences(output, sep)

    entity_str = r"""
    ^                           # 字符串开始
    (\w+(?:\s\w+)*)             # 实体1: 一个或多个单词
    \s*                         # 空白字符
    \((\w+)\)                   # 实体1类型: 括号中的单词
    $                           # 字符串结束
    """

    def split_by_relation_type(sentence_part, relation_types):
        for relation_type in relation_types:
            if relation_type in sentence_part:
                if relation_type.startswith("<"):
                    _prediction_part = sentence_part.split(relation_type, maxsplit=2)
                else:
                    _prediction_part = sentence_part.split(f" {relation_type} ", maxsplit=2)
                if len(_prediction_part) == 2:
                    _prediction_part = list(map(str.strip, _prediction_part))
                    return relation_type, _prediction_part

    def parse_entity_part(entity_part):
        match = entity_pattern.match(entity_part)
        if match is None or len(match.groups()) != 2:
            return None, None
        entity, entity_type = match.groups()
        return entity.strip(), entity_type.strip().lower()

    quintuples = []
    entity_pattern = re.compile(entity_str, re.VERBOSE)

    for item in sentence_parts:
        relation_type, entity_parts = split_by_relation_type(item, relation_types)

        if relation_type is None:
            continue

        head, head_type = parse_entity_part(entity_parts[0])
        tail, tail_type = parse_entity_part(entity_parts[1])

        if head and tail and head_type and tail_type:
            head = head.strip()
            tail = tail.strip()
            head_type = head_type.strip().lower()
            tail_type = tail_type.strip().lower()
            rel_type = relation_type.strip().lower()

        if (
            head_type in entity_labels
            and tail_type in entity_labels
            and rel_type in relation_types
        ):
            quintuples.append((head, head_type, rel_type, tail, tail_type))

    return quintuples

    # pattern = re.compile(pattern_str, re.VERBOSE)

    # 解析出关系五元组
    # quintuples = []
    # for item in sentence_parts:
    #     match = pattern.match(item)
    #     if match is None or len(match.groups()) != 5:
    #         continue

    #     head, head_type, rel_type, tail, tail_type = match.groups()

    #     head = head.strip()
    #     tail = tail.strip()
    #     head_type = head_type.strip().lower()
    #     tail_type = tail_type.strip().lower()
    #     rel_type = rel_type.strip().lower()

    #     if (
    #         head_type in entity_labels
    #         and tail_type in entity_labels
    #         and rel_type in relation_types
    #     ):
    #         quintuples.append((head, head_type, rel_type, tail, tail_type))

    # return quintuples


def compute_lack_new_fn_fp_tp(pred, truth):
    """计算缺失的、新增的、正确的、错误的数量

    Args:
        pred (List[tuple]): 预测的关系三元组列表
        truth (List[Tuple]): 真实的关系三元组列表

    Returns:
        tuple: 包含以下元素的元组：
            - lack (list): 缺失项列表，即在真实集合中但不在预测集合中的元素。
            - new (list): 新增项列表，即在预测集合中但不在真实集合中的元素。
            - tp (int): 真阳性数量，即预测集合和真实集合的交集大小。
            - fp (int): 假阳性数量，即新增项的数量。
            - fn (int): 假阴性数量，即缺失项的数量。
    """
    pred = set(pred)
    truth = set(truth)

    intersection = truth.intersection(pred)

    lack = truth - intersection
    new = pred - intersection

    tp = len(intersection)
    fp = len(new)
    fn = len(lack)

    return list(lack), list(new), tp, fp, fn


def compute_micro_metric(tps, fps, fns):
    """计算所有预测结果的 micro precision, recall, f1-score

    Args:
        tps (int): 所有类型的 true positive 总数
        fps (_type_): 所有类型的 false positive 总数
        fns (_type_): 所有类型的 false negative 总数

    Returns:
        dict:
            - micro_avg_precision: 微观平均精确率
            - micro_avg_recall: 微观平均召回率
            - micro_avg_f1-score: 微观平均 f1-score
    """

    micro_precision = tps / (tps + fps) if (tps + fps) != 0 else 0
    micro_recall = tps / (tps + fns) if (tps + fns) != 0 else 0
    micro_f1 = (
        2 * micro_precision * micro_recall / (micro_precision + micro_recall)
        if (micro_precision + micro_recall) != 0
        else 0
    )

    return {
        "micro_avg_precision": micro_precision,
        "micro_avg_recall": micro_recall,
        "micro_avg_f1-score": micro_f1,
    }


if __name__ == "__main__":
    # test_output = "the relation between 1 and 2 is 3, the relation between 4 and 5 is 6"

    # options = ["3", "6"]
    # print(parse_relation_triple_from_output(test_output, options))

    # test_output = "(John Wilkes Booth,kill,President Lincoln)"
    # options = ["kill"]
    # print(parse_relation_triple_with_triple_format(test_output, options))

    test_output = "algorithm used for computing optical flow , shape , motion , lighting , and albedo<sep>image sequence used for algorithm<sep>rigidly-moving Lambertian object feature of image sequence<sep>distant illumination feature of rigidly-moving Lambertian object."
    entity_labels = ["na"]
    options = [
        "conjunction",
        "feature of",
        "hyponym of",
        "used for",
        "part of",
        "compare",
        "evaluate for",
    ]

    quintuples = parse_output_to_quintuples(test_output, options, entity_labels)
    print(quintuples)
