from __future__ import print_function

from CUB import CUBDataSet
from config import root_dir, resnet_path
from torch.utils.data.dataloader import DataLoader
from sef import LocalMaxGlobalMin, resnet50
import torch
import torch.nn as nn


def train(nb_epoch, batch_size, nparts = 4):

    # Data
    print('==> Preparing data..')
    trainset = CUBDataSet(root_dir, train=True)
    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)

    # Model init
    ##################################################################################
    model = resnet50(pretrained=False, nparts=4)
    state_params = torch.load(resnet_path)

    # pop redundant params from laoded states
    state_params.pop('fc.weight')
    state_params.pop('fc.bias')

    # modify output layer
    in_channels = model.fc.in_features
    new_fc = nn.Linear(in_channels, 200, bias=True)
    model.fc = new_fc

    # initializing model using pretrained params except the modified layers
    model.load_state_dict(state_params, strict=False)
    ####################################################################################

    # GPU
    model = model.cuda()
    # cudnn.benchmark = True

    optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    # optimization scheduler
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)

    for epoch in range(nb_epoch):
        print('\nEpoch: %d' % epoch)
        model.train()
        softmax = nn.Softmax(dim=-1).cuda()
        logsoftmax = nn.LogSoftmax(dim=-1).cuda()
        lmgm = LocalMaxGlobalMin(rho=1, nchannels=512*4, nparts=nparts).cuda()

        train_acc_sum = 0
        train_cnt = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            #####################################################
            inputs = inputs.cuda()
            targets = targets.cuda()
            train_cnt += inputs.shape[0]

            xglobal, xlocal, xcosin = model(inputs)

            _, pred = torch.max(xglobal, dim=-1)
            train_acc_sum += torch.eq(pred, targets).sum().item()

            # global loss
            cls_loss = nn.CrossEntropyLoss()(xglobal, targets)

            # part distillation
            probl, predl, logprobl = [], [], []

            for i in range(nparts):
                probl.append(softmax(xlocal[i]))
                predl.append(torch.max(probl[i], 1)[-1])
                logprobl.append(logsoftmax(xlocal[i]))

            probs = softmax(xglobal)
            logprobs = logsoftmax(xglobal)

            entropy_loss = torch.mul(probs, logprobs).sum().div(inputs.size(0))
            soft_loss_list = []
            for i in range(nparts):
                soft_loss_list.append(torch.mul(torch.neg(probs), logprobl[i]).sum().div(inputs.size(0)))
            soft_loss = 0.05 * sum(soft_loss_list).div(nparts)

            # regularization loss
            lmgm_reg_loss = lmgm(xcosin)
            reg_loss = lmgm_reg_loss + entropy_loss + soft_loss

            loss = reg_loss + cls_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print("Epoch {}=>train acc: [{}/{}]:{}".format(epoch, train_acc_sum, train_cnt, train_acc_sum/train_cnt))
        ######################################################
        ########### test phase ###############################
        ######################################################

        testset = CUBDataSet(root_dir, train=False)
        testloader = DataLoader(testset, batch_size=3, shuffle=False, num_workers=4)
        acc_sum = 0
        cnt = 0
        with torch.no_grad():
            model.eval()
            for batch_idx, (inputs, targets) in enumerate(testloader):
                inputs = inputs.cuda()
                targets = targets.cuda()
                output = model(inputs, train=False)

                cnt += inputs.shape[0]

                _, pred = torch.max(output, dim=-1)
                acc_sum += torch.eq(pred, targets).sum().item()
            print("test acc: [{}/{}]:{}".format(acc_sum, cnt, acc_sum / cnt))
        #####################
        ####################
        scheduler.step()

if __name__ == '__main__':
    train(nb_epoch=50,batch_size=16)
