import os
import logging
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import torch.backends.cudnn as cudnn
from KD.models import Discriminator, Teacher, Student, UNet
from KD.criterion import CriterionDSN,CriterionCWD, CriterionKD, CriterionAdv, CriterionAdvForG, CriterionAdditionalGP, CriterionConsis
from segmentation.utils import visualize
from torchvision.utils import save_image
from KD.utils import apply_trans_batch


class NetModel():
    def name(self):
        return 'kd_seg'

    def __init__(self, args):
        self.args = args
        self.student = UNet(args.in_ch, args.num_classes).cuda()
        # self.student = Student(args.in_ch, args.num_classes, args.consistency).cuda()
        self.teacher = Teacher(args.batch_size, args.G_pkl_dir, args.SegNet_pkl_dir, args.consistency).eval().requires_grad_(False).cuda()

        # self.G_solver = optim.Adam([{'params': filter(lambda p: p.requires_grad, self.student.parameters()), 'initial_lr': args.lr_g}], args.lr_g)
        # self.G_solver = optim.SGD([{'params': filter(lambda p: p.requires_grad, self.student.parameters()), 'initial_lr': args.lr_g}], args.lr_g, momentum=args.momentum, weight_decay=args.weight_decay)
        
        self.D_model = Discriminator(args.preprocess_GAN_mode, args.num_classes, args.batch_size, args.imsize_for_adv, args.adv_conv_dim).cuda()
        self.D_solver = optim.Adam(filter(lambda p: p.requires_grad, self.D_model.parameters()), args.lr_d, [0.9, 0.99])
        logging.info("------------")

        # self.D_solver = optim.SGD([{'params': filter(lambda p: p.requires_grad, D_model.parameters()), 'initial_lr': args.lr_d}], args.lr_d, momentum=args.momentum, weight_decay=args.weight_decay)

        self.G_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.G_solver, mode='max', factor=0.5, patience=5)
        # self.D_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.D_solver, mode='max', factor=0.5, patience=5)

        self.criterion_dsn = CriterionDSN().cuda()
        if args.kd:
            self.criterion_kd = CriterionKD().cuda()
        if args.adv:
            self.criterion_adv = CriterionAdv(args.adv_loss_type).cuda()
            if args.adv_loss_type == 'wgan-gp': self.criterion_AdditionalGP = CriterionAdditionalGP(self.D_model, args.lambda_gp).cuda()
            self.criterion_adv_for_G = CriterionAdvForG(args.adv_loss_type).cuda()
        if args.cwd:
            self.criterion_cwd = CriterionCWD(args.norm_type,args.divergence,args.temperature).cuda()
        if args.consistency:
            self.criterion_consis = CriterionConsis().cuda()

        self.G_loss, self.D_loss = 0.0, 0.0
        self.mc_G_loss, self.kd_G_loss, self.adv_G_loss, self.cwd_G_loss = 0.0, 0.0, 0.0, 0.0

        cudnn.deterministic = True
        cudnn.benchmark = False

    def lr_poly(self, base_lr, iter, max_iter, power):
        return base_lr*((1-float(iter)/max_iter)**(power))
            
    def adjust_learning_rate(self, base_lr, optimizer, i_iter):
        args = self.args
        lr = self.lr_poly(base_lr, i_iter, args.num_steps, args.power)
        optimizer.param_groups[0]['lr'] = lr
        return lr

    def segmentation_forward(self):
        with torch.no_grad():
            self.images, self.preds_T, self.repre1 = self.teacher()
            # self.images, self.preds_T, self.feat_T = self.teacher()
            self.images = torch.clamp(self.images, -1, 1)
            self.labels = self.preds_T.argmax(1)
            if self.args.aug:
                self.images_aug, self.labels_aug = apply_trans_batch(self.images, self.labels, self.images.shape[1])
        if self.args.aug:
            self.preds, self.repre = self.student.train()(torch.cat([self.images, self.images_aug], dim=0))

            self.preds_S, self.preds_aug = torch.split(self.preds, self.args.batch_size)
            del self.preds
            if self.args.consistency:
                self.repre2 = [i[:self.args.batch_size] for i in self.repre]
            del self.repre

            save_image(torch.cat([(self.images_aug.repeat(1,3,1,1)+1)/2, visualize(self.labels_aug), visualize(self.preds_aug.argmax(1))]), os.path.join(self.args.folder_dir, 'visual.png'), nrow=self.args.batch_size)
        else:
            self.preds_S, self.repre2 = self.student.train()(self.images)
            save_image(torch.cat([(self.images.repeat(1,3,1,1)+1)/2, visualize(self.labels), visualize(self.preds_S.argmax(1))]), os.path.join(self.args.folder_dir, 'visual.png'), nrow=self.args.batch_size)

        # self.preds_S, self.feat_S = self.student.train()(self.images)

    def segmentation_backward(self):
        args = self.args
        if self.args.aug:
            temp = (self.criterion_dsn(self.preds_aug, self.labels_aug) + self.criterion_dsn(self.preds_S, self.labels))/2
        else:
            temp = self.criterion_dsn(self.preds_S, self.labels)
        self.mc_G_loss = temp.item()
        g_loss = temp
        if args.kd:
            # use reduction=batchmean in KLDivLoss for not showing UserWarning
            # MeanResult * BCHW = BatchMeanResult * B = SumResult
            temp = args.lambda_kd*self.criterion_kd(self.preds_S, self.preds_T)/(args.num_classes*256*256)
            self.kd_G_loss = temp.item()
            g_loss = g_loss + temp
        if args.adv:
            temp = args.lambda_adv*self.criterion_adv_for_G(self.D_model(self.preds_S))
            self.adv_G_loss = temp.item()
            g_loss = g_loss + temp
        if args.cwd:
            temp = args.lambda_cwd*self.criterion_cwd(self.preds_S, self.preds_T)
            self.cwd_G_loss = temp.item()
            g_loss = g_loss + temp
        self.consis_G_loss = 0
        if args.consistency:
            temp = args.lambda_consis*self.criterion_consis(self.repre1, self.repre2)
            self.consis_G_loss = temp.item()
            g_loss = g_loss + temp
        g_loss.backward()
        self.G_loss = g_loss.item()

    def discriminator_forward_backward(self):
        args = self.args
        d_loss = args.lambda_d*self.criterion_adv(self.D_model(self.preds_S.detach()), self.D_model(self.preds_T.detach()))
        if args.adv_loss_type == 'wgan-gp': d_loss += args.lambda_d*self.criterion_AdditionalGP(self.preds_S, self.preds_T)
        d_loss.backward()
        self.D_loss = d_loss.item()

    def optimize_parameters(self):
        self.segmentation_forward()
        self.G_solver.zero_grad()
        self.segmentation_backward()
        self.G_solver.step()
        if self.args.consistency:
            del self.repre1
            del self.repre2
        if self.args.adv:
            self.D_solver.zero_grad()
            self.discriminator_forward_backward()
            self.D_solver.step()

    def print_info(self):
        return self.G_solver.param_groups[-1]['lr'], self.G_loss, \
                        self.mc_G_loss, self.kd_G_loss, self.adv_G_loss, self.cwd_G_loss, self.consis_G_loss, \
                        self.D_solver.param_groups[-1]['lr'], self.D_loss

    def save_ckpt(self, step):
        args = self.args
        logging.info('saving ckpt: '+args.save_path+'/'+args.data_set+'_'+str(step)+'_G.pth')
        torch.save(self.student.state_dict(), args.save_path+'/'+args.data_set+'_'+str(step)+'_G.pth')
        if self.args.adv:
            logging.info('saving ckpt: '+args.save_path+'/'+args.data_set+'_'+str(step)+'_D.pth')
            torch.save(self.D_model.state_dict(), args.save_path+'/'+args.data_set+'_'+str(step)+'_D.pth')

    def __del__(self):
        pass
