"""
make it a general trainer for object detection models
"""
import os
import time
import numpy as np
import torch
import torch.optim
import torch.backends.cudnn as cudnn

from vortex.data import get_data
from vortex.models import get_model

from vortex.engine.misc import adjust_learning_rate, save_state_dict
from vortex.utils.misc import AverageMeter, Logger


class Trainer(object):
    def __init__(self, opts):
        # modify options.
        # sys_info = get_system_info()

        self.opts = opts

        if opts.TRAIN.CUDA and torch.cuda.is_available():
            self.device = 'cuda'
        else:
            self.device = 'cpu'
        
        # load model
        self.model_name = opts.MODEL.NAME
        self.model = get_model(opts.MODEL)
        self.model.to(self.device)

        # load data with model specific data transforms.
        train_transform = self.model.get_data_transform('trainval')
        val_transform = self.model.get_data_transform('test')
        self.train_loader, self.val_loader = get_data(opts.DATA, train_transform, val_transform)

        # load train parameters
        biases = []
        not_biases = []
        for param_name, param in self.model.named_parameters():
            if param.requires_grad:
                if param_name.endswith('.bias'):
                    biases.append(param)
                else:
                    not_biases.append(param)

        self.optimizer = torch.optim.SGD(params=[{'params': not_biases, 'lr': opts.TRAIN.LR}, 
                                                 {'params': biases, 'lr': opts.TRAIN.LR * 2}], 
                                         lr=opts.TRAIN.LR,
                                         momentum=opts.TRAIN.MOMENTUM,
                                         weight_decay=opts.TRAIN.WEIGHT_DECAY)
        
        self.num_epochs = opts.TRAIN.EPOCHS
        self.lr = opts.TRAIN.LR
        self.lr_decay_steps = opts.TRAIN.LR_DECAY_STEPS
        self.lr_decay = opts.TRAIN.LR_DECAY
        self.checkpoint_freq = opts.TRAIN.CHECKPOINT_FREQ
        self.print_freq = opts.TRAIN.PRINT_FREQ
        self.log_dir = os.path.join(opts.TRAIN.LOG_DIR, opts.TRAIN.EXP_NAME)
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        
        self.train_logger = Logger(os.path.join(self.log_dir, 'train.txt'), ['epoch', 'lr', 'loss'])
        self.val_logger = Logger(os.path.join(self.log_dir, 'val.txt'), ['epoch', 'loss'])

        cudnn.benchmark = True  # allow pytorch to reserve memory and train faster.

    def train_epoch(self, epoch):
        self.model.train()
        print('train for epoch {}'.format(epoch))

        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        start = time.time()

        for i, (images, boxes, labels, paths) in enumerate(self.train_loader):
            data_time.update(time.time() - start)

            images = images.to(self.device)
            boxes = [b.to(self.device) for b in boxes]
            labels = [l.to(self.device) for l in labels]

            # calculate loss
            loss = self.model(images, boxes, labels)

            # backprop
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            losses.update(loss.item(), images.size(0))
            batch_time.update(time.time() - start)

            start = time.time()

            # print logging info
            if i % self.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(self.train_loader),
                                                                      batch_time=batch_time,
                                                                      data_time=data_time, loss=losses))

        return losses.avg

    def val_epoch(self, epoch):
        self.model.eval()
        print('validation for epoch {}'.format(epoch))

        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        start = time.time()

        for i, (images, boxes, labels) in enumerate(self.val_loader):
            data_time.update(time.time() - start)

            images = images.to(self.device)
            boxes = [b.to(self.device) for b in boxes]
            labels = [l.to(self.device) for l in labels]

            # calculate loss
            with torch.no_grad():
                loss = self.model(images, boxes, labels)

            losses.update(loss.item(), images.size(0))
            batch_time.update(time.time() - start)

            start = time.time()

            # print logging info
            if i % self.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(self.train_loader),
                                                                      batch_time=batch_time,
                                                                      data_time=data_time, loss=losses))
        return losses.avg

    def train(self):
        current_lr = self.lr
        for epoch in range(self.num_epochs):
            # adjust learning rate
            if epoch in self.lr_decay_steps:
                current_lr = adjust_learning_rate(self.optimizer, self.lr_decay)
            
            train_loss = self.train_epoch(epoch)
            self.train_logger.log({'epoch': epoch, 'lr': current_lr, 'loss': train_loss})
            val_loss = self.val_epoch(epoch)
            self.val_logger.log({'epoch': epoch, 'loss': val_loss})
            
            # save checkpoint weights
            if (epoch + 1) % self.checkpoint_freq == 0:
                save_state_dict(epoch + 1, self.model, self.log_dir, prefix=self.model_name)
