import torch
from collections import Counter
from processors.utils_ner import get_entities

class SeqEntityScore(object):
    def __init__(self, id2label,markup='bios'):
        self.id2label = id2label
        self.markup = markup
        self.reset()

    def reset(self):
        self.origins = []
        self.founds = []
        self.rights = []

    def compute(self, origin, found, right):
        recall = 0 if origin == 0 else (right / origin)
        precision = 0 if found == 0 else (right / found)
        f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
        return recall, precision, f1

    def result(self):
        class_info = {}
        origin_counter = Counter([x[0] for x in self.origins])
        found_counter = Counter([x[0] for x in self.founds])
        right_counter = Counter([x[0] for x in self.rights])
        for type_, count in origin_counter.items():
            origin = count
            found = found_counter.get(type_, 0)
            right = right_counter.get(type_, 0)
            recall, precision, f1 = self.compute(origin, found, right)
            class_info[type_] = {"acc": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
        origin = len(self.origins)
        found = len(self.founds)
        right = len(self.rights)
        recall, precision, f1 = self.compute(origin, found, right)
        return {'acc': precision, 'recall': recall, 'f1': f1}, class_info

    def update(self, label_paths, pred_paths):
        '''
        labels_paths: [[],[],[],....]
        pred_paths: [[],[],[],.....]

        :param label_paths:
        :param pred_paths:
        :return:
        Example:
            >>> labels_paths = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
            >>> pred_paths = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
        '''
        for label_path, pre_path in zip(label_paths, pred_paths):
            label_entities = get_entities(label_path, self.id2label,self.markup)
            pre_entities = get_entities(pre_path, self.id2label,self.markup)
            self.origins.extend(label_entities)
            self.founds.extend(pre_entities)
            self.rights.extend([pre_entity for pre_entity in pre_entities if pre_entity in label_entities])

class SpanEntityScore(object):
    def __init__(self, id2label):
        self.id2label = id2label
        self.reset()

    def reset(self):
        self.origins = []
        self.founds = []
        self.rights = []

    def compute(self, origin, found, right):
        recall = 0 if origin == 0 else (right / origin)
        precision = 0 if found == 0 else (right / found)
        f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
        return recall, precision, f1

    def result(self):
        class_info = {}
        origin_counter = Counter([self.id2label[x[0]] for x in self.origins])
        found_counter = Counter([self.id2label[x[0]] for x in self.founds])
        right_counter = Counter([self.id2label[x[0]] for x in self.rights])
        for type_, count in origin_counter.items():
            origin = count
            found = found_counter.get(type_, 0)
            right = right_counter.get(type_, 0)
            recall, precision, f1 = self.compute(origin, found, right)
            class_info[type_] = {"acc": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
        origin = len(self.origins)
        found = len(self.founds)
        right = len(self.rights)
        recall, precision, f1 = self.compute(origin, found, right)
        return {'acc': precision, 'recall': recall, 'f1': f1}, class_info

    def update(self, true_subject, pred_subject):
        self.origins.extend(true_subject)
        self.founds.extend(pred_subject)
        self.rights.extend([pre_entity for pre_entity in pred_subject if pre_entity in true_subject])



def compute_metric_with_fuse(golds, preds):
    # 用一个矩阵储存结果 (3,3)
    # 3 class * (tp, fp, fn)
    # 结果顺序为 precision, recall, f1, tp, positive, true
    preds = [i[-1] for arr in preds for i in arr]  # B-E I-E E-E S-E -> E
    golds = [i[-1] for arr in golds for i in arr]
    counts = [[0, 0, 0], [0, 0, 0]]
    label_name_list = ["F(E)", "E(F)"]
    
    for i, v in enumerate(golds):
        if golds[i] != "F" and golds[i] != "E":
            if preds[i] == "F":
                counts[0][1] += 1 #F-fp
            elif preds[i] == "E":
                counts[1][1] += 1 #E-fp
            else:
                pass
        elif golds[i] == 'F':
            if preds[i] == 'F':
                counts[0][0] += 1  #F-tp
            elif preds[i] == 'E':
                counts[0][0] += 1
            else:
                counts[0][2] += 1  #F-fp
        elif golds[i] == 'E':
            if preds[i] == 'E':
                counts[1][0] += 1  # E-tp
            elif preds[i] == 'F':
                counts[1][0] += 1
            else:
                counts[1][2] += 1
        else:
            pass

    report='{:>12s}{:>11s}{:>10s}{:>10s}{:>10s}{:>10s}{:>10s}\n'.format('','precision','recall','f1-score','tp','tp+fp','tp+fn')
    f1s = []
    for i, v in enumerate(counts):
        if (counts[i][0] + counts[i][1]) == 0:
            p = 0
        else:
            p = counts[i][0]/(counts[i][0] + counts[i][1])

        if (counts[i][0] + counts[i][2]) == 0:
            r = 0
        else:
            r = counts[i][0]/(counts[i][0] + counts[i][2])

        if (p + r) == 0:
            f1 = 0
        else:
            f1 = 2*p*r/(p+r)
        f1s.append(f1)
        report += f"{label_name_list[i]:>12s}{p:>11.4f}{r:>10.4f}{f1:>10.4f}" \
        f"{counts[i][0]:>10d}{counts[i][0] + counts[i][1]:>10d}{counts[i][0] + counts[i][2]:>10d}\n"

    return report, f1s



if __name__ == '__main__':
    y_true = [['E',  'E',  'F', 'O', 'O', 'O', 'O', 'E', 'F',]]
    y_pred = [['E',  'F',  'E', 'O', 'O', 'O', 'O', 'O', 'E',]]
    report, f1s = compute_metric_with_fuse(y_true, y_pred)
    print(report)
    print(f1s)