from data import *
from layers.modules import RefineDetMuliBoxLoss
from model.refinedet_vgg import build_refinedet

import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np

BATCH_SIZE = 12
NUM_WORKERS = 4
LR = 1e-3
DEVICE = torch.device("cuda")


if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    print("defalut tensor type: torch.cuda.FloatTensor")
else:
    torch.set_default_tensor_type('torch.FloatTensor')
    print("defalut tensor type: torch.FloatTensor")


def train():
    cfg = prior_box_config
    dataset = COCODetectionDataset('./data/coco', 'train2014',
                                     transform=BaseTransform(512, MEANS))
    refinedet_net = build_refinedet('train')

    net = torch.nn.DataParallel(refinedet_net)
    cudnn.benchmark = True

    # load vgg net
    vgg_weights = torch.load('./weights/vgg16_reducedfc.pth')
    print("loading base network")
    refinedet_net.vgg.load_state_dict(vgg_weights)
    # init others layers' weights with xavier method
    refinedet_net.extras.apply(weights_init)
    refinedet_net.arm_conf.apply(weights_init)
    refinedet_net.arm_loc.apply(weights_init)
    refinedet_net.odm_conf.apply(weights_init)
    refinedet_net.odm_loc.apply(weights_init)
    refinedet_net.tcb_fpred.apply(weights_init)
    refinedet_net.tcb_fscale.apply(weights_init)
    refinedet_net.tcb_fupsample.apply(weights_init)

    optimizer = optim.SGD(net.parameters(), LR, 0.8, weight_decay=5e-4)

    arm_criterion = RefineDetMuliBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False)
    odm_criterion = RefineDetMuliBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3,
                                            0.5, False, use_ARM=True)
    net.train()
    # counters for losses
    arm_loc_loss = 0
    arm_conf_loss = 0
    odm_loc_loss = 0
    odm_conf_loss = 0
    epoch = 0

    epoch_size = len(dataset)//BATCH_SIZE
    print('training RefineDet on:', dataset.name)

    step_index = 0

    data_loader = data.DataLoader(dataset, BATCH_SIZE,
                                    num_workers=NUM_WORKERS,
                                    shuffle=True,
                                    collate_fn=detection_collate,
                                    pin_memory=True)

    # create batch iterator
    batch_iterator = iter(data_loader)
    for iteration in range(0, cfg['max_iter']):
        if (iteration % epoch_size == 0):
            arm_conf_loss = 0
            arm_loc_loss = 0
            odm_conf_loss = 0
            odm_loc_loss = 0
            epoch += 1
        if iteration in cfg['lr_steps']:
            step_index += 1
            adjust_learning_rate(optimizer, 0.1, step_index)

        try:
            images, targets = next(batch_iterator)
        except StopIteration:
            batch_iterator = iter(data_loader)
            images, targets = next(batch_iterator)

        # using cuda
        images = images.to(DEVICE)
        targets = [ann.to(DEVICE) for ann in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        arm_loss_l, arm_loss_c = arm_criterion(out, targets)
        odm_loss_l, odm_loss_c = odm_criterion(out, targets)
        arm_loss = arm_loss_c + arm_loss_l
        odm_loss = odm_loss_c + odm_loss_l
        loss = arm_loss + odm_loss
        loss.backward()
        optimizer.step()
        t1 = time.time()
        arm_loc_loss += arm_loss_l.item()
        arm_conf_loss += arm_loss_c.item()
        odm_loc_loss += odm_loss_l.item()
        odm_conf_loss += odm_loss_c.item()

        if iteration % 10 == 0:
            print('timer: %.4f sec.' % (t1 - t0))
            print('iter ' + repr(iteration) + ' || ARM_L Loss: %.4f ARM_C Loss: %.4f ODM_L Loss: %.4f ODM_C Loss: %.4f ||' \
            % (arm_loss_l.item(), arm_loss_c.item(), odm_loss_l.item(), odm_loss_c.item()), end=' ')        
            print('eta:{:d} s'.format((cfg['max_iter']-iteration)*(t1 - t0)))    
        if iteration != 0 and iteration % 5000 == 0:
            print('Saving state, iter:', iteration)
            torch.save(refinedet_net.state_dict(), './weights' 
            + '/RefineDet{}_{}_{}.pth'.format(512, "COCO", 
            repr(iteration)))
    print('Saving state, iter:', iteration)
    torch.save(refinedet_net.state_dict(), './weights' 
    + '/RefineDet{}_{}_{}.pth'.format(512, "COCO", 
    repr(iteration)))



def adjust_learning_rate(optimizer, gamma, step):
    """Sets the learning rate to the initial LR decayed by 10 at every
        specified step
    # Adapted from PyTorch Imagenet example:
    # https://github.com/pytorch/examples/blob/master/imagenet/main.py
    """
    lr = LR * (gamma ** (step))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

def xavier(param):
    init.xavier_uniform_(param)

def weights_init(m):
    if isinstance(m, nn.Conv2d):
        xavier(m.weight.detach())
        m.bias.data.zero_()
    elif isinstance(m, nn.ConvTranspose2d):
        xavier(m.weight.detach())
        m.bias.data.zero_()

if __name__ == "__main__":
    train()