# -*- coding: utf-8 -*-


import torch
import numpy as np

from typing import Dict


def tagging2sentence(sents, tags, masks, idx2char: Dict[int, str], idx2tag: Dict[int, str]):
    """根据 tags 转换成 ner 后的句子"""
    sentences = []
    for sent, tag, mask in zip(sents, tags, masks):
        sentence = []
        seq_len = mask.sum(dim=0).long()
        entity, entity_type = "", ""
        for i in range(seq_len):
            char_idx, ner_type = sent[i], tag[i]
            ner, label = idx2tag[ner_type].split('-')
            if ner in "B":
                entity += idx2char.get(char_idx)
                entity_type = label
            elif ner in "M":
                entity += idx2char.get(char_idx)
            elif ner in "E":
                entity += idx2char.get(char_idx)
                sentence.append(entity + "/" + entity_type)
                entity, entity_type = "", ""
            elif ner in "S":
                sentence.append(idx2char.get(char_idx) + "/" + label)
                entity, entity_type = "", ""
            else:
                sentence.append(idx2char.get(char_idx))
            pass
        sentence = " ".join(sentence)
        sentences.append(sentence)
    return sentences


def tagging2ner(sents, tags, masks, idx2char: Dict[int, str], idx2tag: Dict[int, str]):
    """将 tags 序列转换成 命名实体
    :return named entitis, (entity_name, span, entity_type)
    """
    matches = []
    for sent, tag, mask in zip(sents, tags, masks):
        match = []
        seq_len = mask.sum(dim=0).long()
        start, end = 0, 0
        entity, entity_type = "", ""
        for i in range(seq_len):
            char_idx, ner_type = sent[i], tag[i]
            ner, label = idx2tag[ner_type].split('-')
            if ner in "B":
                entity += idx2char.get(char_idx)
                entity_type = label
                start = i
            elif ner in "M":
                entity += idx2char.get(char_idx)
            elif ner in "E":
                entity += idx2char.get(char_idx)
                end = i
                match.append((entity, (start, end), entity_type))
                start, end = 0, 0
                entity, entity_type = "", ""
            elif ner in "S":
                match.append((idx2char.get(char_idx), (i, i), label))
                start, end = 0, 0
                entity, entity_type = "", ""
            pass
        matches.append(match)
        pass
    return matches


def __prf(golden_matches, predict_matches):
    """
    real, pred |  True  | False
    -----------| ------ | ------
       True    |   TP   |  FN
       False   |   FP   |  TN
    -----------| ------ | ------

    precision = TP / (TP + FP)
    recall = TP / (TP + FN)
    f1-measure = 2 * P * R / ( P + R )

    :param golden_matches: (batch_size, match), match: (entity, span, type)
    :param predict_matches:
    :return:
    """
    ner_precisions = []
    ner_recalls = []
    ner_f1s = []

    type_precisions = []
    type_recalls = []
    type_f1s = []

    for gold, pred in zip(golden_matches, predict_matches):
        golden_ner, predict_ner = set(), set()
        golden_type, predict_type = set(gold), set(pred)
        for match in gold:
            golden_ner.add(match[1])
        for match in pred:
            predict_ner.add(match[1])
        ner_precision = len(predict_ner.intersection(golden_ner)) / len(predict_ner) if len(predict_ner) > 0 else 0
        ner_recall = len(predict_ner.intersection(golden_ner)) / len(golden_ner) if len(golden_ner) > 0 else 0
        ner_f1 = 2 * ner_precision * ner_recall / (ner_precision + ner_recall) if (ner_precision + ner_recall) > 1e-9 else 0

        type_precision = len(predict_type.intersection(golden_type)) / len(predict_type) if len(predict_type) > 0 else 0
        type_recall = len(predict_type.intersection(golden_type)) / len(golden_type) if len(golden_type) > 0 else 0
        type_f1 = 2 * type_precision * type_recall / (type_precision + type_recall) if (type_precision + type_recall) > 1e-9 else 0

        ner_precisions.append(ner_precision)
        ner_recalls.append(ner_recall)
        ner_f1s.append(ner_f1)

        type_precisions.append(type_precision)
        type_recalls.append(type_recall)
        type_f1s.append(type_f1)

    batch_size = len(ner_precisions)
    return sum(ner_precisions) / batch_size, sum(ner_recalls) / batch_size, sum(ner_f1s) / batch_size, \
           sum(type_precisions) / batch_size, sum(type_recalls) / batch_size, sum(type_f1s) / batch_size


def write_predict(golden_sentences, pred_sentences):
    with open("predict.txt", 'a+') as f:
        for s1, s2 in zip(golden_sentences, pred_sentences):
            f.write("="*40 + "\n")
            f.write(s1 + "\n")
            f.write('-'*40 + "\n")
            f.write(s2 + "\n")
        pass


def prf(sents, pred_tags, gold_tags, masks, idx2char: Dict[int, str], idx2tag: Dict[int, str]):
    sents, gold_tags, masks = sents.cpu().numpy(), gold_tags.cpu().numpy(), masks.cpu()
    golden_matches = tagging2ner(sents, gold_tags, masks, idx2char, idx2tag)
    predict_matches = tagging2ner(sents, pred_tags, masks, idx2char, idx2tag)
    golden_sentences = tagging2sentence(sents, gold_tags, masks, idx2char, idx2tag)
    predict_sentences = tagging2sentence(sents, pred_tags, masks, idx2char, idx2tag)
    write_predict(golden_sentences, predict_sentences)
    return __prf(golden_matches, predict_matches)
