from __future__ import print_function
import os
from PIL import Image

import logging
import random
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn

from utils import *
from CUB import CUBDataSet
from CAR import CARDataSet
from AIR import AIRDateset
from DOG import DOGDateSet
from config import root_dir, data_name
from torch.utils.data.dataloader import DataLoader

class WeakCrossEntropy(nn.Module):
    def __init__(self, topk):
        super(WeakCrossEntropy, self).__init__()
        self.soft = nn.Softmax(dim=-1)
        self.topk = topk

    def forward(self, x, tgt):
        x = self.soft(x)
        batch, _ = x.shape
        losses = []
        for i, f in enumerate(x):
            t = tgt[i]
            s = f[t]
            left = f[:t].unsqueeze(0)
            right = f[t+1:].unsqueeze(0)
            newf = torch.cat((left, right), dim=-1)
            candinate, _ = newf.topk(self.topk)
            loss = -torch.log(s / (candinate.sum() + s))
            losses.append(loss)
        losses = torch.stack(losses)
        return losses.mean()


def train(nb_epoch, batch_size, store_name, resume=False, start_epoch=0, model_path=None):
    # setup output
    exp_dir = store_name
    try:
        os.stat(exp_dir)
    except:
        os.makedirs(exp_dir)

    use_cuda = torch.cuda.is_available()
    print(use_cuda)

    # Data
    print('==> Preparing data..')
    if data_name == 'bird':
        trainset = CUBDataSet(root_dir, train=True)
        trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    elif data_name == 'car':
        trainset = CARDataSet(root_dir, train=True)
        trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    elif data_name == 'craft':
        trainset = AIRDateset(root_dir, train=True)
        trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    else:
        trainset = DOGDateSet(root_dir, train=True)
        trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)


    # Model
    if resume:
        net = torch.load(model_path)
    else:
        net = load_model(model_name='resnet50_pmg', pretrain=True, require_grad=True)
    netp = net.cuda() 

    # GPU
    # cudnn.benchmark = True

    CELoss = nn.CrossEntropyLoss()
    KLLoss = nn.KLDivLoss(reduction='batchmean')
    optimizer = optim.SGD([
        {'params': net.classifier_concat.parameters(), 'lr': 0.002},
        {'params': net.conv_block1.parameters(), 'lr': 0.002},
        {'params': net.classifier1.parameters(), 'lr': 0.002},
        {'params': net.conv_block2.parameters(), 'lr': 0.002},
        {'params': net.classifier2.parameters(), 'lr': 0.002},
        {'params': net.conv_block3.parameters(), 'lr': 0.002},
        {'params': net.classifier3.parameters(), 'lr': 0.002},
        {'params': net.features.parameters(), 'lr': 0.0002}

    ],
        momentum=0.9, weight_decay=5e-4)

    max_val_acc = 0
    lr = [0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.0002]
    for epoch in range(start_epoch, nb_epoch):
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss = 0
        train_loss1 = 0
        train_loss2 = 0
        train_loss3 = 0
        train_loss4 = 0
        correct = 0
        total = 0
        idx = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            idx = batch_idx
            inputs, targets = inputs.cuda(), targets.cuda()

            # update learning rate
            for nlr in range(len(optimizer.param_groups)):
                optimizer.param_groups[nlr]['lr'] = cosine_anneal_schedule(epoch, nb_epoch, lr[nlr])

            # Step 1
            output_1, output_2, output_3, output_concat, rkloss = netp(inputs)
            loss1 = CELoss(output_1, targets)
            loss2 = CELoss(output_2, targets)
            loss3 = CELoss(output_3, targets)
            concat_loss = CELoss(output_concat, targets)
            # klloss = (KLLoss(output_1.log_softmax(dim=-1), output_2.softmax(dim=-1))
            #           + KLLoss(output_2.log_softmax(dim=-1), output_3.softmax(dim=-1))
            #           + KLLoss(output_3.log_softmax(dim=-1), output_concat.softmax(dim=-1))
            #           + KLLoss(output_concat.log_softmax(dim=-1), output_global.softmax(dim=-1)))/4
            loss = loss1 + loss2 + loss3 + concat_loss + rkloss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #  training log
            _, predicted = torch.max(output_3.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()

            train_loss += (loss1.item() + loss2.item() + loss3.item() + concat_loss.item())
            train_loss1 += loss1.item()
            train_loss2 += loss2.item()
            train_loss3 += loss3.item()
            train_loss4 += concat_loss.item()

            if batch_idx % 50 == 0:
                print(
                    'Step: %d | Loss1: %.3f | Loss2: %.5f | Loss3: %.5f | Loss_concat: %.5f | Loss: %.3f | Acc: %.3f%% (%d/%d)' % (
                    batch_idx, train_loss1 / (batch_idx + 1), train_loss2 / (batch_idx + 1),
                    train_loss3 / (batch_idx + 1), train_loss4 / (batch_idx + 1), train_loss / (batch_idx + 1),
                    100. * float(correct) / total, correct, total))
            #break

        train_acc = 100. * float(correct) / total
        train_loss = train_loss / (idx + 1)
        with open(exp_dir + '/results_train.txt', 'a') as file:
            file.write(
                'Iteration %d | train_acc = %.5f | train_loss = %.5f | Loss1: %.3f | Loss2: %.5f | Loss3: %.5f | Loss_concat: %.5f |\n' % (
                epoch, train_acc, train_loss, train_loss1 / (idx + 1), train_loss2 / (idx + 1), train_loss3 / (idx + 1),
                train_loss4 / (idx + 1)))

        if True:
            val_acc_glo, val_acc_cat, val_acc_com, val_loss = test(net, CELoss, 3)
            if val_acc_com > max_val_acc:
                max_val_acc = val_acc_com
                torch.save(net, './' + store_name + '/model.pth')
            with open(exp_dir + '/results_test.txt', 'a') as file:
                file.write(
                    'Iteration %d, test_acc_glo = %.5f, test_acc_cat = %.5f, test_acc_combined = %.5f, test_loss = %.6f\n' % (
                        epoch, val_acc_glo, val_acc_cat, val_acc_com, val_loss))

import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
train(nb_epoch=200,             # number of epoch
         batch_size=16,         # batch size
         store_name=data_name,     # folder for output
         resume=False,          # resume training from checkpoint
         start_epoch=0,         # the start epoch number when you resume the training
         model_path='')         # the saved model where you want to resume the training
