from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
import copy
from logs import *
from utils.preprocessing import *
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from utils.plotting import *
from utils.metrix import *

def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          multi_loader,
          val_data_set,
          val_data_loader,
          save_dir,
          print_inter=200,
          val_inter=3500
          ):
    writer = SummaryWriter(save_dir)
    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_NE = float('inf')


    multi_loader.VirEpoch = start_epoch-1
    while multi_loader.VirEpoch < epoch_num:
        exp_lr_scheduler.step(multi_loader.VirEpoch)
        model.train()

        # which loader to provide data
        i = multi_loader.Step % multi_loader.num_datasets
        batch = multi_loader.next(i)
        imgs, lm_masks, vis_masks, ori_sizes, flms, cate_idxs = batch

        imgs = Variable(imgs.cuda())
        lm_masks = Variable(lm_masks.cuda())
        vis_masks = Variable(vis_masks.cuda())

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward
        outputs = model(imgs)

        if outputs[0].size() != lm_masks.size():
            outputs = F.upsample(outputs, size=lm_masks.size()[-2:], mode='bilinear')

        loss = criterion(outputs, lm_masks, vis_masks)

        loss.backward()
        optimizer.step()

        # batch loss
        if multi_loader.Step % print_inter == 0:

            logging.info('%s [%d-%d] | batch-loss: %.3f '
                        % (dt(), multi_loader.VirEpoch, multi_loader.Step,
                           loss.data[0]))

        # # plot image
        # if multi_loader.Step % (2 * print_inter) == 0:
        #     smp_img = imgs[0]  # (3, H, W)
        #     flm = flms[0]
        #     true_hm = lm_masks[0]
        #     pred_hm = F.sigmoid(outputs[0])
        #
        #     imgs_to_plot = getPlotImg(smp_img, pred_hm, true_hm, flm)
        #
        #     # for TensorBoard
        #     imgs_to_plot = torch.from_numpy(imgs_to_plot.transpose((0, 3, 1, 2)) / 255.0)
        #     grid_image = make_grid(imgs_to_plot, 2)
        #     writer.add_image('plotting', grid_image, multi_loader.Step)
        #     writer.add_scalar('loss', loss.data[0], multi_loader.Step)

        # do eval
        if multi_loader.Step <= 50000:
            val_inter = 6000
        elif 50000 < multi_loader.Step < 70000:
            val_inter = 4000
        else:
            val_inter = 2500
        if multi_loader.Step % val_inter == 0:
            # val phase
            model.train(False)
            num_vals = len(val_data_set)

            preds = np.empty((num_vals, 24, 2))  # (N,24,2) x y
            labels = np.empty((num_vals, 24, 3))  # (N,24,3) x y vis
            val_ori_sizes = np.empty((num_vals, 2))
            val_true_flms = np.empty((num_vals, 24, 3))
            cates = np.empty(num_vals)

            t0 = time.time()
            idx = 0
            for batch_cnt_val, data_val in enumerate(val_data_loader):
                imgs, lm_masks, vis_masks, ori_sizes, flms, cate_idxs = data_val
                imgs = Variable(imgs.cuda())
                # forward
                outputs = F.sigmoid(model(imgs))

                # post process and find peak points   (bs, C, 2)  x,y
                batch_peaks = batch_postprocess(outputs.data.cpu().numpy(), ori_sizes, do_blur=False)

                preds[idx: idx + imgs.size(0)] = batch_peaks
                val_ori_sizes[idx: idx + imgs.size(0)] = ori_sizes
                val_true_flms[idx: idx + imgs.size(0)] = flms
                cates[idx: idx + imgs.size(0)] = cate_idxs

                idx += imgs.size(0)

            labels[:, :, 0] = val_true_flms[:, :, 0] * val_ori_sizes[:, 1][:, np.newaxis]  # ori x coord
            labels[:, :, 1] = val_true_flms[:, :, 1] * val_ori_sizes[:, 0][:, np.newaxis]  # ori y coord
            labels[:, :, 2] = val_true_flms[:, :, 2]

            score, detail = cal_NEscore(preds.astype(int), labels.astype(int), cates)

            t1 = time.time()
            since = t1 - t0
            logging.info('--' * 30)
            logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
            logging.info('%s epoch[%d] | val-NE@1: %.3f%% | time: %d'
                         % (dt(), multi_loader.VirEpoch, score * 100, since))

            for key in detail.keys():
                logging.info('NE: %.3f%% support: %d  %s' % (detail[key][0] * 100, detail[key][1], key))

            if score < best_NE:
                best_NE = score
                best_model_wts = copy.deepcopy(model.state_dict())

            # save model
            save_path1 = os.path.join(save_dir,
                                      'weights-%d-%d-[%.4f].pth' % (multi_loader.VirEpoch,
                                                                    multi_loader.Step, score))
            torch.save(model.state_dict(), save_path1)
            save_path2 = os.path.join(save_dir,
                                      'optimizer-state.pth')
            torch.save(optimizer.state_dict(), save_path2)

            logging.info('saved model to %s' % (save_path1))
            logging.info('--' * 30)

    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.4f].pth' % (best_NE))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))


    return best_acc,best_model_wts