from sklearn import metrics
import torch
from enum import Enum, unique
import numpy as np
from colorama import Fore
import time
import os
from utils.instant_tools import get_current_time


@unique
class PrintLevel(Enum):
    DETIAL = 0
    SIMPLIFY = 1
    NONE = 2

# decorator
def timing(func):
    def time_click_wrap(*args, **kwargs):
        since = time.time()

        func(*args, **kwargs)

        time_diff = time.time() - since
        print('epoch complete in {:0.2f} seconds on gpu {}'.format(time_diff, torch.cuda.current_device()))

    return time_click_wrap


class Logger(object):
    '''
    保存并打印当前的各项参数指标
    '''
    def __init__(self, model, save_path, show_train=False, print_level=PrintLevel.SIMPLIFY):
        super(Logger, self).__init__()
        self.print_level = print_level
        self.model = model
        self.net_name = model.__class__.__name__
        self.save_path = save_path
        if self.save_path is not None:
            # 指定了指标存储路径，但该路径不存在，创建
            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)
                print("metrics saving path %s doesn't exist, created." % self.save_path)
            # 路径已经存在了，清空其中的测试集指标文件
            else:
                files = os.listdir(self.save_path)
                if "test.txt" in files:
                    os.remove(os.path.join(self.save_path, "test.txt"))

        self.show_train = show_train
        self.save_dic_mf = lambda dic: dic['average_f1_score']
        self.init_metrics()

    def init_metrics(self):
        self.classify_report            = None
        self.classify_report_pursed     = None
        self.confusion_matrix           = None

        self.overall_accuracy           = None
        self.top1_error_rate            = None

        self.precision_for_each_class   = None
        self.average_precision          = None

        self.recall_rate_for_each_class = None
        self.average_recall_rate        = None

        self.f1_score_for_each_class    = None
        self.average_f1_score           = None

        self.auc                        = None

        self.loss                       = None

        self.dic                        = None

    def calculate_metrics(self, y_true, y_pred, max_prob):
        self.classify_report            = metrics.classification_report(y_true, y_pred, digits=3)
        self.classify_report_dict       = metrics.classification_report(y_true, y_pred, digits=3, output_dict=True)
        self.classify_report_pursed     = self.purse_classify_report_str(self.classify_report)
        self.confusion_matrix           = metrics.confusion_matrix(y_true, y_pred)

        self.overall_accuracy           = metrics.accuracy_score(y_true, y_pred)
        self.top1_error_rate            = 1 - self.overall_accuracy

        self.precision_for_each_class   = metrics.precision_score(y_true, y_pred, average=None)
        self.average_precision          = self.classify_report_dict["weighted avg"]["precision"]

        self.recall_rate_for_each_class = metrics.recall_score(y_true, y_pred, average=None)
        self.average_recall_rate        = self.classify_report_dict["weighted avg"]["recall"]

        self.f1_score_for_each_class    = metrics.f1_score(y_true, y_pred, average=None)
        self.average_f1_score           = self.classify_report_dict["weighted avg"]["f1-score"]

        # self.auc                        = self.calculate_auc(y_true, y_pred, max_prob)

        self.dic = {
                'net_name': self.net_name,

               'classify_report_str': self.classify_report,
               'classify_report_pursed': self.classify_report_pursed,
               'confusion_matrix': self.confusion_matrix,
               'precision_for_each_class': self.precision_for_each_class,
               'recall_rate_for_each_class': self.recall_rate_for_each_class,
               'f1_score_for_each_class': self.f1_score_for_each_class,

               'overall_accuracy': self.overall_accuracy,
               # 'auc': self.auc,
               'top1_error_rate': self.top1_error_rate,
               'average_precision': self.average_precision,
               'average_recall_rate': self.average_recall_rate,
               'average_f1_score': self.average_f1_score,

               'loss': self.loss
                }
        return self.dic


    def init_from_dic(self, dic):
        self.classify_report            = dic['classify_report_str']
        self.classify_report_pursed     = dic['classify_report_pursed']
        self.confusion_matrix           = dic['confusion_matrix']

        self.auc                        = dic['auc']
        self.overall_accuracy           = dic['overall_accuracy']
        self.top1_error_rate            = dic['top1_error_rate']

        self.precision_for_each_class   = dic['precision_for_each_class']
        self.average_precision          = dic['average_precision']

        self.recall_rate_for_each_class = dic['recall_rate_for_each_class']
        self.average_recall_rate        = dic['average_recall_rate']

        self.f1_score_for_each_class    = dic['f1_score_for_each_class']
        self.average_f1_score           = dic['average_f1_score']

        self.loss                       = dic['loss']

    def print(self, is_train):
        if self.print_level == PrintLevel.NONE:
            return
        if is_train and not self.show_train:
            print(Fore.YELLOW, 'train loss:', self.loss, Fore.BLACK)
            return

        get_current_time()
        if self.print_level == PrintLevel.DETIAL:
            print('classify_report : \n', self.classify_report)
            print('confusion_matrix : \n', self.confusion_matrix)

        train_str = 'train' if is_train else 'test'
        print(Fore.GREEN, '%s loss:' % (train_str), self.loss, Fore.BLACK)
        # print('auc: {0:f}'.format(self.auc))
        print('overall_accuracy: {0:f}'.format(self.overall_accuracy))
        print('top-1 error rate: {0:f}'.format(self.top1_error_rate))

        print('average_precision: {0:f}'.format(self.average_precision))
        print('average_recall_rate: {0:f}'.format(self.average_recall_rate))
        print(Fore.RED, 'average_f1_score: {0:f}'.format(self.average_f1_score), Fore.BLACK)

        print()

    def save_dic2txt(self, txt_path):
        '''
        把self.dic 转换成一个字符串写入txt文件
        :return:
        '''
        np.set_printoptions(precision=5)
        with open(txt_path, 'w') as f:
            current_time = get_current_time(is_print=False) + '\n'
            f.write(current_time)
            for key in self.dic.keys():
                if key == "classify_report_pursed" or "for_each_class" in key:
                    continue
                f.write(key + ": ")
                if (key == "classify_report_str"):
                    f.write('\n')
                if (key == "confusion_matrix"):
                    f.write('\n')
                if isinstance(self.dic[key], float) or isinstance(self.dic[key], np.float64):
                    f.write("{:.5f}".format(self.dic[key]))
                else:
                    f.write(str(self.dic[key]))
                f.write('\n')

    def get_metrics_from_txt(self, txt_path, metrics):
        '''
        从保存指标的txt文件中获取指定的指标metrics
        :param txt_path:
        :param metrics:
        :return:
        '''
        with open(txt_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                if metrics in line:
                    return float(line.split(':')[-1])

    def save(self, is_train):
        # 保存指标
        phase = 'train' if is_train else 'test'
        best_metrics = -1
        txt_path = os.path.join(self.save_path, phase + ".txt")
        if os.path.exists(txt_path):
            best_metrics = self.get_metrics_from_txt(txt_path, "average_f1_score")
        # 如果当前是训练集，直接保存当前的训练指标
        if phase == 'train':
            self.save_dic2txt(txt_path)
        # 如果当前是测试集，仅在打破记录时记录指标，并保存模型
        else:
            if self.save_dic_mf(self.dic) > best_metrics:
                self.save_dic2txt(txt_path)
                torch.save(self.model, '%s/%s.pkl' % (self.save_path, self.net_name))

    def purse_classify_report_str(self, classify_report_str):
        cols = classify_report_str.split('\n')
        valid_cols = []
        for c in cols:
            s = c.split()
            # 该行为空或者第一个元素不是数字，跳过
            if len(s) == 0 or not s[0].isnumeric():
                continue
            s = [float(i) for i in s]
            valid_cols.append(s)
        return valid_cols

    def log_metrics(self, y_true, y_pred, max_prob, is_train):
        # 清空指标
        # self.init_metrics()
        # 计算指标
        self.calculate_metrics(y_true, y_pred, max_prob)
        # 打印指标
        self.print(is_train=is_train)
        # 保存指标
        if self.save_path is not None:
            self.save(is_train)

        return self.dic

