from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
import copy
from logs import *
from utils.preprocessing import *
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from utils.plotting import *
from utils.metrix import *


def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          print_inter=200,
          val_inter=3500,
          ):

    writer = SummaryWriter(save_dir)
    best_model_wts = model.state_dict()
    best_NE = float('inf')

    running_loss = 9999
    step = -1
    for epoch in range(start_epoch,epoch_num):
        # train phase
        exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode


        for batch_cnt, data in enumerate(data_loader['train']):

            step+=1
            model.train(True)
            # print data
            # imgs (N,3,336,336) Tensor
            # lm_masks (N,24,42,42) Tensor, Gaussian smoothed
            # vis_masks (N,24) Tensor
            # ori_size (N,2) numpy  ori img.shape[0], img.shape[1]
            # fms (N,24,3)  numpy  %
            # cate_idxs (24,) numpy
            imgs, lm_masks, vis_masks, ori_sizes, flms, cate_idxs = data


            imgs = Variable(imgs.cuda())
            lm_masks = Variable(lm_masks.cuda())
            vis_masks = Variable(vis_masks.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            outputs, aux_out = model(imgs)

            if outputs.size() != lm_masks.size():
                outputs = F.upsample(outputs, size=lm_masks.size()[-2:], mode='bilinear')

            loss = criterion(outputs, lm_masks, vis_masks) + criterion(aux_out, lm_masks, vis_masks)

            loss.backward()
            optimizer.step()
            running_loss = running_loss*0.95 + 0.05*loss.data[0]

            if step % print_inter == 0:
                logging.info('%s [%d-%d] | batch-loss: %.3f | running-loss: %.3f'
                             % (dt(), epoch, batch_cnt, loss.data[0], running_loss))

            # plot image
            if step % (2*print_inter) == 0:
                smp_img = imgs[0]  # (3, H, W)
                flm = flms[0]
                true_hm = lm_masks[0]
                pred_hm = F.sigmoid(outputs[0])

                imgs_to_plot = getPlotImg(smp_img, pred_hm, true_hm, flm)

                # for TensorBoard
                imgs_to_plot = torch.from_numpy(imgs_to_plot.transpose((0,3,1,2))/255.0)
                grid_image = make_grid(imgs_to_plot, 2)
                writer.add_image('plotting',grid_image, step)
                writer.add_scalar('loss', loss.data[0],step)

            if step % val_inter == 0:
                # val phase
                model.train(False)
                num_vals = len(data_set['val'])

                preds = np.empty((num_vals, 24, 2))  # (N,24,2) x y
                labels = np.empty((num_vals, 24, 3))  # (N,24,3) x y vis
                val_ori_sizes = np.empty((num_vals, 2))
                val_true_flms = np.empty((num_vals, 24,3))
                cates = np.empty(num_vals)

                t0 = time.time()
                idx = 0

                for batch_cnt_val, data_val in enumerate(data_loader['val']):
                    imgs, lm_masks, vis_masks, ori_sizes, flms, cate_idxs = data_val
                    imgs = Variable(imgs.cuda())
                    # forward
                    outputs = F.sigmoid(model(imgs))

                    # post process and find peak points   (bs, C, 2)  x,y
                    batch_peaks = batch_postprocess(outputs.data.cpu().numpy(), ori_sizes, do_blur=False)

                    preds[idx: idx+imgs.size(0)] = batch_peaks
                    val_ori_sizes[idx: idx+imgs.size(0)] = ori_sizes
                    val_true_flms[idx: idx+imgs.size(0)] = flms
                    cates[idx: idx+imgs.size(0)] = cate_idxs

                    idx += imgs.size(0)

                labels[:, :, 0] = val_true_flms[:, :, 0] * val_ori_sizes[:, 1][:, np.newaxis]  # ori x coord
                labels[:, :, 1] = val_true_flms[:, :, 1] * val_ori_sizes[:, 0][:, np.newaxis]  # ori y coord
                labels[:, :, 2] = val_true_flms[:, :, 2]

                score, detail = cal_NEscore(preds.astype(int), labels.astype(int), cates)

                t1 = time.time()
                since = t1-t0
                logging.info('--'*30)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                logging.info('%s epoch[%d] | val-NE@1: %.3f%% | time: %d'
                             % (dt(), epoch, score*100, since))

                for key in detail.keys():
                    logging.info('NE: %.3f%% support: %d  %s'%(detail[key][0]*100,detail[key][1], key))

                if score < best_NE:
                    best_NE = score
                    best_model_wts = copy.deepcopy(model.state_dict())


                # save model
                save_path1 = os.path.join(save_dir,
                        'weights-%d-%d-[%.4f].pth'%(epoch,batch_cnt,score))
                torch.save(model.state_dict(), save_path1)
                save_path2 = os.path.join(save_dir,
                        'optimizer-state.pth')
                torch.save(optimizer.state_dict(), save_path2)

                logging.info('saved model to %s' % (save_path1))
                logging.info('--' * 30)

    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.4f].pth' % (best_NE))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))

    return best_NE, best_model_wts