from __future__ import print_function, absolute_import
import time

import torch
from torch.autograd import Variable
import torch.nn.functional as F

from .evaluation_metrics import accuracy
from .utils.meters import AverageMeter
from .utils import Bar
from torch.nn import functional as F
from config import Experiment_config
import reid.triplet
# from triplet import TripletLoss
# import ipdb
TripletLoss = reid.triplet.TripletLoss

config = Experiment_config()
class BaseTrainer(object):
    def __init__(self, model, criterion_c,criterion_t, X, Y, SMLoss_mode=0):
        super(BaseTrainer, self).__init__()
        self.model = model
        self.criterion_c = criterion_c
        self.criterion_t = criterion_t
        self.config = config

    def train(self, epoch, data_loader, optimizer, print_freq=1):
        # ipdb.set_trace()
        # print (epoch)
        self.model.train()

        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        precisions = AverageMeter()
        end = time.time()

        # bar = Bar('Processing', max=len(data_loader))
        bar = Bar('Processing', max=202)
        for i in range(202):
            data_time.update(time.time() - end)

            source_triplet_train_iter = iter(data_loader)
            data_tri = next(source_triplet_train_iter)
            inputs_tri, labels_tri = self._parse_data(data_tri)

            # now_batch_size, c, h, w = inputs.shape


            # inputs, targets = self._parse_data(inputs)
            loss0, loss1, loss2, loss3, loss4, loss5, cross_prec, trip_loss, trip_prec = self._forward(
                inputs_tri, labels_tri)
            # print(trip_loss)

#=========================================================================
            # print ('loss0', loss0)
            # ipdb.set_trace()
            # loss = (loss0 + loss1 + loss2 + loss3 + loss4 + loss5) / 6 +  \
            #        self.config.param_triplet_loss * trip_loss
            # loss = (loss0 + loss1 + loss2 + loss3 + loss4 + loss5) / 6 + \
            #        1.0 * trip_loss
            loss = (loss0 + loss1 + loss2 + loss3 + loss4 + loss5)/6 +   trip_loss /3
            losses.update(loss.data, labels_tri.size(0))
            precisions.update(cross_prec, labels_tri.size(0))

            optimizer.zero_grad() 
            # if  config.param_triplet_loss:
            #     torch.autograd.backward([loss0,
            #                          loss1,
            #                          loss2,
            #                          loss3,
            #                          loss4,
            #                          loss5,
            #                          trip_loss
            #                          ],
            #                         [torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(1).cuda(),
            #                          torch.ones(int(self.config.param_triplet_loss)).cuda()])
            # else:
            torch.autograd.backward([loss0,
                                     loss1,
                                     loss2,
                                     loss3,
                                     loss4,
                                     loss5,
                                     trip_loss
                                     ],
                                    [torch.ones(1).cuda(),
                                     torch.ones(1).cuda(),
                                     torch.ones(1).cuda(),
                                     torch.ones(1).cuda(),
                                     torch.ones(1).cuda(),
                                     torch.ones(1).cuda(),
                                     torch.ones(int(self.config.param_triplet_loss)).cuda()])
                                     #torch.ones(int(self.config.param_triplet_loss)).cuda()])

            # torch.autograd.backward(loss)
            # loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            # bar.suffix = 'Epoch: [{N_epoch}][{N_batch}/{N_size}] | Time {N_bt:.3f} {N_bta:.3f} | Data {N_dt:.3f} {N_dta:.3f} | Loss {N_loss:.3f} {N_lossa:.3f} | Prec {N_prec:.2f} {N_preca:.2f}'.format(
            #     N_epoch=epoch,
            #     N_batch=i +
            #     1,
            #     N_size=len(data_loader),
            #     N_bt=batch_time.val,
            #     N_bta=batch_time.avg,
            #     N_dt=data_time.val,
            #     N_dta=data_time.avg,
            #     N_loss=losses.val,
            #     N_lossa=losses.avg,
            #     N_prec=precisions.val,
            #     N_preca=precisions.avg,
            # )
            bar.suffix = '[{N_epoch}][{N_batch}/{N_size}] | Loss {N_loss:.3f} {N_lossa:.3f} | Prec {N_prec:.2f} {N_preca:.2f} | {model}'.format(
                N_epoch=epoch,
                N_batch=i +
                        1,
                N_size=202,
                N_loss=losses.val,
                N_lossa=losses.avg,
                N_prec=precisions.val,
                N_preca=precisions.avg,
                model=self.config.logs_dir.split('/')[-1]
            )
            bar.next()
        bar.finish()

    def _parse_data(self, inputs):
        raise NotImplementedError

    def _forward(self, inputs, targets):
        raise NotImplementedError


class Trainer(BaseTrainer):
    def _parse_data(self, inputs):
        imgs, _, pids, _ = inputs
        inputs = [Variable(imgs)]
        targets = Variable(pids.cuda())
        return inputs, targets

    def _forward(self, inputs, targets):
        trip_loss = 0
        trip_prec = 0
        outputs = self.model(*inputs, targets=targets)
        # print('outputs shape is:')
        # print(outputs[1][0].shape)
        # print('targets shape is:')
        # print(targets)
        # time.sleep(100)
        index = (targets - 751).data.nonzero().squeeze_()

        # if isinstance(self.criterion, torch.nn.CrossEntropyLoss):
        loss0 = self.criterion_c(outputs[1][0], targets)
        loss1 = self.criterion_c(outputs[1][1], targets)
        loss2 = self.criterion_c(outputs[1][2], targets)
        loss3 = self.criterion_c(outputs[1][3], targets)
        loss4 = self.criterion_c(outputs[1][4], targets)
        loss5 = self.criterion_c(outputs[1][5], targets)
        cross_prec, = accuracy(outputs[1][2].data, targets.data)

        trip_loss, trip_prec = self.criterion_t(outputs[2], outputs[3])



            #prec = prec[0]

        # elif isinstance(self.criterion, OIMLoss):
        #    loss, outputs = self.criterion(outputs, targets)
        #     prec, = accuracy(outputs.data, targets.data)
        #     prec = prec[0]
        # elif isinstance(self.criterion, TripletLoss):

        #     cross_loss = torch.nn.CrossEntropyLoss().cuda()
        #     loss0 = cross_loss(outputs[2][0], targets)
        #     loss1 = cross_loss(outputs[2][1], targets)
        #     loss2 = cross_loss(outputs[2][2], targets)
        #     loss3 = cross_loss(outputs[2][3], targets)
        #     loss4 = cross_loss(outputs[2][4], targets)
        #     loss5 = cross_loss(outputs[2][5], targets)
        #     cross_prec, = accuracy(outputs[2][2].data, targets.data)
         
        #     trip_loss, trip_prec = self.criterion(outputs[1], targets)


        # elif isinstance(self.criterion, )

        return loss0, loss1, loss2, loss3, loss4, loss5, cross_prec, trip_loss, trip_prec

# def triplet_trainer(object):
#     def __init__(self, model, criterion_c, criterion_t, beta):
