"""
training ssd object detection model
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../'))

import time
import torch
import torch.optim
import torch.utils.data
import torch.backends.cudnn as cudnn
from vortex.models.ssd300 import SSD300
from vortex.network.head.ssd import MultiBoxLoss

from vortex.data.voc import PascalVOCDataset
from vortex.data import get_transforms
from vortex.utils.engine import adjust_learning_rate, save_checkpoint, clip_gradient, save_state_dict
from vortex.utils.misc import AverageMeter
from vortex.data.labels import get_label_map
from vortex.utils.config import get_config
from vortex.utils.engine import check_weights


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class SSDTrainer(object):
    def __init__(self, opts):
        print(opts)
        self.labels, self.label_map = get_label_map(opts.LABELS)
        self.n_classes = len(self.labels) + 1

        self.start_epoch, self.model, self.optimizer = self.load_model_and_weights(opts)
        self.model = self.model.to(device)
        self.criterion = MultiBoxLoss(priors=self.model.priors_cxcy).to(device)
        
        # load dataset
        transforms = get_transforms('train')
        train_dataset = PascalVOCDataset(opts.DATA_ROOT, split='train', transform=transforms)
        self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opts.BATCH_SIZE, 
                                                        shuffle=True, collate_fn=train_dataset.collate_fn, 
                                                        num_workers=opts.N_WORKERS, pin_memory=True)

        self.epochs = opts.ITERATIONS // (len(train_dataset) // opts.BATCH_SIZE)
        self.decay_lr_at = [it // (len(train_dataset) // opts.BATCH_SIZE) for it in opts.DECAY_LR_AT]
        self.decay_lr_to = opts.DECAY_LR_TO
        self.grad_clip = None
        self.print_freq = opts.PRINT_FREQ
        self.checkpoint_freq = opts.CHECKPOINT_FREQ

        cudnn.benchmark = True

    def load_model_and_weights(self, opts):
        start_epoch = 0
        if opts.CHECKPOINT.endswith('pth.tar'):
            # load model and optimizer from checkpoint
            checkpoint = torch.load(opts.CHECKPOINT, map_location='cpu')
            start_epoch = checkpoint['epoch'] + 1
            model = checkpoint['model']
            optimizer = checkpoint['optimizer']
        else:
            model = SSD300(n_classes=self.n_classes)
            if opts.CHECKPOINT.endswith('.pth'):
                # load pretrained weights from checkpoint
                state_dict = torch.load(opts.CHECKPOINT, map_location='cpu')
                model.load_state_dict(state_dict)
            biases = []
            not_biases = []
            for param_name, param in model.named_parameters():
                if param.requires_grad:
                    if param_name.endswith('.bias'):
                        biases.append(param)
                    else:
                        not_biases.append(param)

            optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * opts.LR}, 
                                                {'params': not_biases, 'lr': opts.LR}], 
                                        lr=opts.LR, momentum=opts.MOMENTUM, weight_decay=opts.WEIGHT_DECAY)
        return start_epoch, model, optimizer

    def run(self):
        for epoch in range(self.start_epoch, self.epochs):
            if epoch in self.decay_lr_at:
                adjust_learning_rate(self.optimizer, self.decay_lr_to)

            self.train_epoch(epoch)

            if (epoch + 1) % self.checkpoint_freq == 0:
                # save_checkpoint(epoch, self.model, self.optimizer)      # save all
                save_state_dict(epoch, self.model)                      # save only weights

    def train_epoch(self, epoch):
        self.model.train()  # training mode enables dropout
        print('train for epoch {}'.format(epoch))
        batch_time = AverageMeter()  # forward prop. + back prop. time
        data_time = AverageMeter()  # data loading time
        losses = AverageMeter()  # loss

        start = time.time()

        # Batches
        for i, (images, boxes, labels, paths) in enumerate(self.train_loader):
            print('------------------------------------------')
            print(images)

            data_time.update(time.time() - start)

            # Move to default device
            images = images.to(device)  # (batch_size (N), 3, 300, 300)
            boxes = [b.to(device) for b in boxes]
            labels = [l.to(device) for l in labels]

            # Forward prop.
            predicted_locs, predicted_scores = self.model(images)  # (N, 8732, 4), (N, 8732, n_classes)

            # Loss
            loss = self.criterion(predicted_locs, predicted_scores, boxes, labels)  # scalar
            print(loss.item())
            # Backward prop.
            self.optimizer.zero_grad()
            loss.backward()

            # Clip gradients, if necessary
            if self.grad_clip is not None:
                clip_gradient(self.optimizer, self.grad_clip)

            # Update model
            self.optimizer.step()

            losses.update(loss.item(), images.size(0))
            batch_time.update(time.time() - start)

            start = time.time()

            # Print status
            if i % self.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(self.train_loader),
                                                                      batch_time=batch_time,
                                                                      data_time=data_time, loss=losses))
        del predicted_locs, predicted_scores, images, boxes, labels  # free some memory since their histories may be stored


if __name__ == '__main__':
    cfg = get_config('./configs/ssd300.yaml')

    trainer = SSDTrainer(cfg.SSD300)
    trainer.run()
