import time
import utils
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
from loss import *

from checkpoint import *

class Trainer(object):

    def __init__(self, model, opt):

        self.model = model
        self.opt = opt

        self.adam_optimizer = optim.Adam(params=self.model.paramters(), lr=self.opt.adam_lr,weight_decay=self.opt.weight_decay)

        self._scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.adam_optimizer,mode='max', factor=0.5,
                                                               patience=5, verbose=True, threshold=1e-4)

    # control lr by ourselves
    def adjust_learning_rate(self, epoch):
        if epoch == self.opt.ratio[2] or epoch == self.opt.ratio[1] or epoch == self.opt.ratio[0]:
            print("==>change the learning rate\n")
            decay_rate = self.opt.decay_rate
            for param in self.adam_optimizer.param_groups:
                param['lr'] = param['lr'] * decay_rate
                print('Learning rate: %.5f' %(param['lr']))

    def train(self, train_loader, epoch):
        train_loss = 0
        log_file = self.opt.log_file

        self.model.train()

        for i, data in enumerate(train_loader):
            inputs, masks = data
            inputs, masks = inputs.cuda(), masks.cuda()

            outputs = self.model(inputs, masks)

            comp = inputs * masks + outputs * (1 - masks)

            loss_hole = Hole()(outputs, inputs, 0.5)
            loss_valid = Valid()(outputs, inputs, 0.5)
            loss_style = Style()(outputs, inputs)
            loss_vgg = VGG()(outputs, inputs)
            loss_tv = TotalVariation()(comp, masks)

            total_loss = self.opt.valid * loss_valid + self.opt.hole * loss_hole + self.opt.perceptual*loss_vgg + \
                         self.opt.style * loss_style + self.opt.total_variation * loss_tv

            self.adam_optimizer.zero_grad()
            total_loss.backward()
            self.adam_optimizer.step()

            train_loss += total_loss
            if math.isnan(train_loss):
                exit()
            print("[%d, %5d] train_loss: %.5f" %
                    (epoch + 1, i + 1, train_loss / (i + 1)))
            log_str = "Epoch: %d\tIteration: %5d\tTrain_loss:%.4f\n" % (
                epoch + 1, i + 1, train_loss / (i + 1))
            utils.writelog(log_str, log_file)

            out_loss = train_loss / (i + 1)

            return out_loss

    def evaluate(self, test_loader, epoch):
        test_loss = 0
        log_file = self.opt.log_file

        self.model.eval()

        for i, data in enumerate(test_loader):
            inputs, masks = data
            inputs, masks = inputs.cuda(), masks.cuda()

            outputs = self.model(inputs, masks)

            comp = inputs * masks + outputs * (1 - masks)

            loss_hole = Hole()(outputs, inputs, 0.5)
            loss_valid = Valid()(outputs, inputs, 0.5)
            loss_style = Style()(outputs, inputs)
            loss_vgg = VGG()(outputs, inputs)
            loss_tv = TotalVariation()(comp, masks)

            total_loss = self.opt.valid * loss_valid + self.opt.hole * loss_hole + self.opt.perceptual * loss_vgg + \
                         self.opt.style * loss_style + self.opt.total_variation * loss_tv

            test_loss += total_loss

            print("[%d, %5d] test_loss: %.5f " %
                  (epoch + 1, i + 1, test_loss / (i + 1)))

            # write and print result
            log_str = "Epoch: %d\tIteration: %5d\tTest_loss:%.4f\n" % (
                epoch + 1, i + 1, test_loss / (i + 1))
            utils.writelog(log_str, log_file)

            out_loss = test_loss / (i + 1)

            return out_loss
