from __future__ import print_function, absolute_import

import time

import torch

from .utils.meters import AverageMeter


class PDLTrainerUDA(object):
    def __init__(self, encoder, source_classes, memory=None):
        super(PDLTrainerUDA, self).__init__()
        self.encoder = encoder
        self.memory = memory
        self.center = None
        self.source_classes = source_classes

    def train(self, epoch, data_loader_source, data_loader_target, optimizer, optimizer_center, print_freq=10,
              train_iters=400):
        self.encoder.train()

        batch_time = AverageMeter()
        data_time = AverageMeter()

        losses_s = AverageMeter()
        losses_t = AverageMeter()
        if optimizer_center is not None:
            losses_cen_s = AverageMeter()
            losses_cen_t = AverageMeter()

        end = time.time()
        for i in range(train_iters):
            # load data
            source_inputs = data_loader_source.next()
            target_inputs = data_loader_target.next()
            data_time.update(time.time() - end)

            # process inputs
            s_inputs, s_targets, _ = self._parse_data(source_inputs)
            t_inputs, t_targets, _ = self._parse_data(target_inputs)

            # arrange batch for domain-specific BN
            device_num = torch.cuda.device_count()
            B, C, H, W = s_inputs.size()
            def reshape(inputs):
                return inputs.view(device_num, -1, C, H, W)
            s_inputs, t_inputs = reshape(s_inputs), reshape(t_inputs)
            inputs = torch.cat((s_inputs, t_inputs), 1).view(-1, C, H, W)

            # forward
            f_out = self._forward(inputs)

            # de-arrange batch
            f_out = f_out.view(device_num, -1, f_out.size(-1))
            f_out_s, f_out_t = f_out.split(f_out.size(1)//2, dim=1)
            f_out_s, f_out_t = f_out_s.contiguous().view(-1, f_out.size(-1)), f_out_t.contiguous().view(-1, f_out.size(-1))

            # compute loss with the hybrid memory
            loss_s = self.memory(f_out_s, s_targets)
            loss_t = self.memory(f_out_t, t_targets+self.source_classes)

            if optimizer_center is None:
                loss = loss_s+loss_t
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            else:
                loss_cen_s = self.center(f_out_s, s_targets) * 0.003
                loss_cen_t = self.center(f_out_t, t_targets + self.source_classes) * 0.003
                loss_cen = loss_cen_s + loss_cen_t
                loss = (loss_s + loss_t) + loss_cen
                optimizer.zero_grad()
                loss.backward()
                optimizer_center.zero_grad()
                for param in self.center.parameters():
                    param.grad.data *= (1. / 0.003)
                optimizer_center.step()
                optimizer.step()
                losses_cen_s.update(loss_cen_s.item())
                losses_cen_t.update(loss_cen_t.item())

            losses_s.update(loss_s.item())
            losses_t.update(loss_t.item())

            # print log
            batch_time.update(time.time() - end)
            end = time.time()

            if (i + 1) % print_freq == 0:
                if optimizer_center is None:
                    print('Epoch: [{}][{}/{}]\t'
                          'Time {:.3f} ({:.3f})\t'
                          'Data {:.3f} ({:.3f})\t'
                          'Loss_s {:.3f} ({:.3f})\t'
                          'Loss_t {:.3f} ({:.3f})'
                          .format(epoch, i + 1, len(data_loader_target),
                                  batch_time.val, batch_time.avg,
                                  data_time.val, data_time.avg,
                                  losses_s.val, losses_s.avg,
                                  losses_t.val, losses_t.avg))
                else:
                    print('Epoch: [{}][{}/{}]\t'
                          'Time {:.3f} ({:.3f})\t'
                          'Data {:.3f} ({:.3f})\t'
                          'Loss_s {:.3f} ({:.3f})\t'
                          'Loss_t {:.3f} ({:.3f})\t'
                          'Loss_cen_s {:.3f} ({:.3f})\t'
                          'Loss_cen_t {:.3f} ({:.3f})'
                          .format(epoch, i + 1, len(data_loader_target),
                                  batch_time.val, batch_time.avg, data_time.val, data_time.avg,
                                  losses_s.val, losses_s.avg,
                                  losses_t.val, losses_t.avg,
                                  losses_cen_s.val, losses_cen_s.avg,
                                  losses_cen_t.val, losses_cen_t.avg))

    def _parse_data(self, inputs):
        imgs, _, pids, _, indexes = inputs
        return imgs.cuda(), pids.cuda(), indexes.cuda()

    def _forward(self, inputs):
        return self.encoder(inputs)


