# coding: utf-8
from data import *
from data.voc0712 import *
from utils.augmentations import SSDAugmentation
from layers.modules import RefineMultiBoxLoss
import os
import random
import time
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.utils.data as data
from refinedetvgg16 import *
from utils.torchutil import SaveCheckPoint

from utils import Logger

if not os.path.exists("./log/"):
    os.mkdir("./log/")
log = Logger.Logger("./log/{}_{}.log".format(__file__.split('/')[-1],
                                             time.strftime("%Y%m%d-%H%M%S"), time.localtime), level='debug').logger

USE_CUDA = True
GPU_ID = [2, 3]
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in GPU_ID])
device = torch.device("cuda" if torch.cuda.is_available() and USE_CUDA else "cpu")

pre_checkpoint = None
resume = False

train_batch = 24
display = 50

base_lr = 0.01
clip_grad = 120.0
momentum = 0.9
gamma = 0.1
weight_decay = 0.0005
stepsize = [200, 500, 650, 750]
max_epoch = 900

save_interval = 10000

save_dir = "./models"
if not os.path.exists(save_dir):
    os.mkdir(save_dir)
save_prefix = save_dir + "/refinedet_vgg16_320_20181211"
DATASET_ROOT = "~/dataset/VOCdevkit"
DATASET_ROOT = os.path.expanduser(DATASET_ROOT)


# data loader
def _worker_init_fn_():
    torch_seed = torch.initial_seed()
    np_seed = torch_seed // 2 ** 32 - 1
    random.seed(torch_seed)
    np.random.seed(np_seed)


def train():
    start_epoch = 0
    cfg = voc320

    dataset = VOCDetection(root=DATASET_ROOT,
                           transform=SSDAugmentation(cfg['min_dim'],
                                                     MEANS))
    data_loader = data.DataLoader(dataset, train_batch,
                                  num_workers=len(GPU_ID),
                                  shuffle=True, collate_fn=detection_collate,
                                  worker_init_fn=_worker_init_fn_(),
                                  pin_memory=True)

    # base net
    ssd_net = RefineDetVGG16("train", cfg["min_dim"], cfg["num_classes"])
    net = ssd_net

    # priorbox
    net_priorbox = PriorBox(cfg)
    with torch.no_grad():
        priorboxes = net_priorbox.forward()

    # criterion
    arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False, cfg['variance'], device)
    odm_criterion = RefineMultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5, False, cfg['variance'], device,
                                       0.01)

    # optimizer and scheduler
    optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=momentum, weight_decay=weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, stepsize, gamma)

    # device
    if USE_CUDA:
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    if pre_checkpoint:
        cp = torch.load(pre_checkpoint)
        net.load_state_dict(cp['weights'])
        log.info("=> load state dict from {}...".format(pre_checkpoint))
        if resume:
            optimizer.load_state_dict(cp['optimizer'])
            scheduler.load_state_dict(cp['scheduler'])
            start_epoch = cp['epoch']
            log.info("=> resume from epoch: {}, now the lr is: {}".format(start_epoch, optimizer.param_groups[0]['lr']))

    net.to(device)
    priorboxes = priorboxes.to(device)
    arm_criterion.to(device)
    odm_criterion.to(device)


    k = 0
    loss = 0
    for epoch in range(start_epoch, max_epoch + 1):
        # one eopch
        for batch_idx, (images, targets) in enumerate(data_loader):
            net.train()
            images = images.to(device)
            targets = [i.to(device) for i in targets]

            # forward
            t0 = time.time()
            out = net(images)

            # back
            optimizer.zero_grad()
            arm_loc, arm_conf, odm_loc, odm_conf = out
            # arm branch loss
            arm_loss_l, arm_loss_c = arm_criterion((arm_loc, arm_conf), priorboxes, targets)
            # odm branch loss
            odm_loss_l, odm_loss_c = odm_criterion((odm_loc, odm_conf), priorboxes, targets, (arm_loc, arm_conf), False)
            loss = arm_loss_l + arm_loss_c + odm_loss_l + odm_loss_c
            loss.backward()
            # clip grad
            torch.nn.utils.clip_grad_norm_(net.parameters(), clip_grad)
            optimizer.step()

            t1 = time.time()
            if k % display == 0:
                log.info(
                    "iter/epoch: {}/{}, lr: {}, loss: {:.4f},arm_loc: {:.4f}, arm_conf: {:.4f},odm_loc: {:.4f}, odm_conf: {:.4f},time/iter: {:.3f} s".format(
                        k,
                        epoch,
                        optimizer.param_groups[0]['lr'],
                        loss.item(),
                        arm_loss_l.item(),
                        arm_loss_c.item(),
                        odm_loss_l.item(),
                        odm_loss_c.item(),
                        t1 - t0))
            if k % save_interval == 0:
                path = save_prefix + "_iter_{}.pkl".format(k)
                SaveCheckPoint(path, net, optimizer, scheduler, epoch)
                log.info("=> save model: {}".format(path))
            k += 1
        log.info('epoch: {}, lr: {}, loss is: {}'.format(epoch, optimizer.param_groups[0]['lr'], loss.item()))
        scheduler.step()

    log.info("optimize done...")
    path = save_prefix + "_final.pkl"
    SaveCheckPoint(path, net, optimizer, scheduler, max_epoch)
    log.info("=> save model: {} ...".format(path))


if __name__ == '__main__':
    train()
