import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import os
from torch.autograd import Variable
import sys
import time
from utils.util import *
from utils.save import *
from torchvision import datasets, transforms, utils
import torchvision.models as models
# import torchsnooper
import numpy as np
# @torchsnooper.snoop()
# def train(args, train_loader, model, criterion, optimizer, epoch):
#     batch_time = AverageMeter()
#     data_time = AverageMeter()
#     losses = AverageMeter()
#     top1 = AverageMeter()
#     top2 = AverageMeter()
#     log = Log()
#
#     losses1 = AverageMeter()
#     losses2 = AverageMeter()
#     losses3 = AverageMeter()
#     # switch to train mode
#     model.train()
#
#     for i, (data, target, paths) in enumerate(train_loader):
#         if args.gpu is not None:
#             data = data.cuda()
#             target = target.cuda()
#
#         out1, out2, out3, _ = model(data)
#         out = out1 + out2 + 0.1 * out3
#
#         # loss1 = criterion(out1, target)
#         # loss2 = criterion(out2, target)
#         # loss3 = criterion(out3, target)
#         N = data.size(0)
#         C = args.nclass
#         # C is the number of classes.
#         alpha = 0.1
#         gamma = 2
#         P = torch.nn.functional.softmax(out1, dim=1)
#         class_mask = out1.data.new(N, C).fill_(0)
#         class_mask = Variable(class_mask)
#         ids = target.view(-1, 1)
#         class_mask.scatter_(1, ids.data, 1.)
#         probs = (P * class_mask).sum(1).view(-1, 1)
#         log_p = probs.log()
#         batch_loss = -alpha * (torch.pow((1 - probs), gamma)) * log_p
#         loss1 = batch_loss.sum()
#
#         P = torch.nn.functional.softmax(out2, dim=1)
#         class_mask = out2.data.new(N, C).fill_(0)
#         class_mask = Variable(class_mask)
#         ids = target.view(-1, 1)
#         class_mask.scatter_(1, ids.data, 1.)
#         probs = (P * class_mask).sum(1).view(-1, 1)
#         log_p = probs.log()
#         batch_loss = -alpha * (torch.pow((1 - probs), gamma)) * log_p
#         loss2 = batch_loss.sum()
#
#         P = torch.nn.functional.softmax(out3, dim=1)
#         class_mask = out3.data.new(N, C).fill_(0)
#         class_mask = Variable(class_mask)
#         ids = target.view(-1, 1)
#         class_mask.scatter_(1, ids.data, 1.)
#         probs = (P * class_mask).sum(1).view(-1, 1)
#         log_p = probs.log()
#         batch_loss = -alpha * (torch.pow((1 - probs), gamma)) * log_p
#         loss3 = batch_loss.sum()
#         # smoothed_labels = torch.full(size=(N, C), fill_value=0.1 / (C - 1)).cuda()
#         # smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(target, dim=1), value=0.9)
#         # log_prob1 = torch.nn.functional.log_softmax(out1, dim=1)
#         # loss1 = -torch.sum(log_prob1 * smoothed_labels) / N
#         #
#         # log_prob2 = torch.nn.functional.log_softmax(out1, dim=1)
#         # loss2 = -torch.sum(log_prob2 * smoothed_labels) / N
#         #
#         # log_prob3 = torch.nn.functional.log_softmax(out1, dim=1)
#         # loss3 = -torch.sum(log_prob3 * smoothed_labels) / N
#
#         loss = loss1 + loss2 + 0.1 * loss3
#
#         # measure accuracy and record loss
#         prec1, prec2 = accuracy(out, target, topk=(1, 2))  # this is metric on trainset
#         batchsize = data.size(0)
#         losses.update(loss.item()  , batchsize)
#
#         if np.isnan(losses.val):
#             sys.exit('Loss diverged')
#
#         losses1.update(loss1.item(), batchsize)
#         losses2.update(loss2.item(), batchsize)
#         losses3.update(loss3.item(), batchsize)
#         top1.update(prec1[0], batchsize)
#         top2.update(prec2[0], batchsize)
#
#         # compute gradient and do SGD step
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()
#
#         if i % args.print_freq == 0:
#             print('DFL-CNN <==> Train Epoch: [{0}][{1}/{2}]\n'
#                 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
#                 'Loss1 {loss1.val:.4f} ({loss1.avg:.4f})\t'
#                 'Loss2 {loss2.val:.4f} ({loss2.avg:.4f})\t'
#                 'Loss3 {loss3.val:.4f} ({loss3.avg:.4f})\n'
#                 'Top1 {top1.val:.3f} ({top1.avg:.3f})\t'
#                 'Top2 {top2.val:.3f} ({top2.avg:.3f})'.format(
#                 epoch, i, len(train_loader), loss=losses, loss1=losses1, loss2=losses2, loss3=losses3, top1=top1, top2=top2))
#
#             totalloss = [losses, losses1, losses2, losses3]
#             log.save_train_info(epoch, i, len(train_loader), totalloss, top1, top2)

def train(args, train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    log = Log()


    # switch to train mode
    model.train()

    for i, (data, target, paths) in enumerate(train_loader):
        if args.gpu is not None:
            data = data.cuda()
            target = target.cuda()

        out = model(data)

        target_list = list(F.softmax(out).data.cpu().numpy().argmax(axis=1))
        predict_list = list(target.cpu().numpy())

        from sklearn import metrics
        pre_score = metrics.precision_score(target_list, predict_list, labels=[0], average='macro')
        rec_score = metrics.recall_score(target_list, predict_list, labels=[0], average='macro')
        print('train_pre_score:', pre_score)
        print('train_rec_score:', rec_score)

        loss = criterion(out, target)

        # measure accuracy and record loss
        prec= accuracy(out, target)  # this is metric on trainset
        batchsize = data.size(0)

        losses.update(loss.item(), batchsize)

        if np.isnan(losses.val):
            sys.exit('Loss diverged')

        top1.update(prec[0].item(), batchsize)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % args.print_freq == 0:
            print('DFL-CNN <==> Train Epoch: [{0}][{1}/{2}]\n'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Top1 {top1.val:.3f} ({top1.avg:.3f})'
                 .format(
                epoch, i, len(train_loader), loss=losses, top1=top1
                ))

            totalloss = [losses, 0, 0, 0]
            # log.save_train_info(epoch, i, len(train_loader), totalloss, top1, 0)

def muti_train(args, train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    log = Log()


    # switch to train mode
    model.train()
    a = train_loader.dataset.class_to_idx

    for i, (data, target, paths) in enumerate(train_loader):
        class_list = [[int(list(a.keys())[list(a.values()).index(i)][j]) for i in target] for j in range(args.nclass)]
        class_list = torch.tensor(class_list)
        if args.gpu is not None:
            data = data.cuda()
            target = target.cuda()
            class_list = class_list.cuda()
        out = model(data)
        batchsize = data.size(0)
        loss = 0

        for k in range(len(class_list)):
            loss += criterion(out[k], class_list[k])
            # measure accuracy and record loss
            prec = accuracy(out[k], class_list[k])  # this is metric on trainset
            top1.update(prec[0].item(), batchsize)

        losses.update(loss.item(), batchsize)

        if np.isnan(losses.val):
            sys.exit('Loss diverged')

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % args.print_freq == 0:
            print('DFL-CNN <==> Train Epoch: [{0}][{1}/{2}]\n'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Top1 {top1.val:.3f} ({top1.avg:.3f})'
                 .format(
                epoch, i, len(train_loader), loss=losses, top1=top1
                ))

            totalloss = [losses, 0, 0, 0]
            # log.save_train_info(epoch, i, len(train_loader), totalloss, top1, 0)
#