import torch
import torch.optim as optim

from utils.logger import Logger, PrintLevel

from torch.nn import CrossEntropyLoss

def grad_disable(test_function):
    '''
    在测试函数中禁止梯度计算，从而使得在其中不构建计算图，不会导致GPU显存泄漏的发生
    :param test_function:
    :return:
    '''
    def grad_disabled_test(*args, **kwargs):
        with torch.set_grad_enabled(False):
            return test_function(*args, **kwargs)

    return grad_disabled_test

class Classifier():
    '''
    深度学习分类器
    '''
    def __init__(self, model, num_category, criterion=CrossEntropyLoss(),
                 metrics_save_folder=None, lr=0.05):
        '''
        :param model: 分类器模型
        :param num_category: 类别数
        :param criterion: 损失函数
        :param metrics_save_folder:指标存储文件夹
        :param lr: 初始学习率
        :param FPA_enable: 是否启用FPA算法自动搜索学习率
        '''
        self.model = model
        self.num_category = num_category
        self.criterion = criterion
        self.logger = Logger(self.model, metrics_save_folder, show_train=False, print_level=PrintLevel.DETIAL)
        self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=0.8)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.1)

    def train(self, loader):
        # 将模型设置为训练模式，该模式下dropout起作用，否则不起作用（即模型参数固定）
        self.model.train(True)
        log_y_predictions = []
        log_y_trues = []
        log_probs = []
        epoch_loss = 0
        # 加载一个batch
        for inputs_label in loader:
            # 支持多输入单输出网络，最后一个元素是标签
            # 如果使用GPU，在e后边加上.cuda()
            inputs_label = [e for e in inputs_label]
            inputs = inputs_label[:-1]
            inputs = [input.float() for input in inputs]
            label = inputs_label[-1]

            self.optimizer.zero_grad()
            # 前向传播
            output = self.model(*inputs)
            # 计算损失函数
            loss = self.criterion(output, label)
            # 反向传播计算计算图中所有参数的梯度，该函数完成后会自动释放显存
            loss.backward()
            # 执行梯度下降更新模型参数
            self.optimizer.step()

            # 整理模型对该batch的分类结果
            max_probs, y_predictions = torch.max(output, 1)
            y_predictions = [int(p) for p in y_predictions]
            y_labels = [int(l) for l in label]

            loss_cpu = loss.item()
            print("Batch loss:", loss_cpu)

            for e in y_predictions:
                log_y_predictions.append(e)
            for e in y_labels:
                log_y_trues.append(e)
            probs = output.cpu().detach().numpy()
            for e in probs:
                log_probs.append(e)
            epoch_loss += loss_cpu

        self.scheduler.step()
        log_loss = epoch_loss

        self.logger.loss = log_loss
        # 计算分类评价指标
        return self.logger.log_metrics(log_y_trues, log_y_predictions, log_probs, is_train=True)

    @grad_disable
    def test(self, loader):
        self.model.train(False)
        log_loss = None
        log_y_predictions = []
        log_y_trues = []
        log_probs = []
        epoch_loss = 0
        for inputs_label in loader:
            # 支持多输入单输出网络，最后一个元素是标签
            # 如果使用GPU，在e后边加上.cuda()
            inputs_label = [e for e in inputs_label]
            inputs = inputs_label[:-1]
            inputs = [input.float() for input in inputs]
            label = inputs_label[-1]
            output = self.model(*inputs)
            loss = self.criterion(output, label)
            loss_cpu = loss.item()

            max_probs, y_predictions = torch.max(output, 1)
            y_predictions = [int(p) for p in y_predictions]
            y_labels = [int(l) for l in label]

            for e in y_predictions:
                log_y_predictions.append(e)
            for e in y_labels:
                log_y_trues.append(e)
            probs = output.cpu().detach().numpy()
            for e in probs:
                log_probs.append(e)
            epoch_loss += loss_cpu

        log_loss = epoch_loss

        self.logger.loss = log_loss
        return self.logger.log_metrics(log_y_trues, log_y_predictions, log_probs, is_train=False)
