# coding=utf8
from __future__ import division
import torch
import os, time, datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
import copy
from metrics import cal_mAP
from predicting import predict


def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')


def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          augloss=False,
          print_inter=200,
          val_inter=3500
          ):
    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_mAP = 0.0

    step = -1

    for epoch in range(start_epoch, epoch_num):
        # train phase
        exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode
        hard_examples = []
        hard_labels = []

        for batch_cnt, data in enumerate(data_loader['train']):

            step += 1
            model.train(True)
            # print data
            inputs, attr, attr_mask, labels, labels_str = data

            inputs = Variable(inputs.cuda())
            labels = Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            if augloss:
                outputs, aux = model(inputs)
                loss, hard_idx = criterion(outputs, labels) + criterion(aux, labels)
            else:
                # print inputs
                outputs = model(inputs)
                loss, hard_idx = criterion(outputs, labels)
                hard_examples.append(inputs[hard_idx])
                hard_labels.append(labels[hard_idx])
            loss.backward()
            optimizer.step()


            # batch loss
            if step % print_inter == 0:
                attr_mask = Variable(attr_mask.cuda())
                _, preds = torch.max(outputs * attr_mask, 1)
                _, labels = torch.max(labels, 1)

                batch_corrects = torch.sum((preds == labels)).data[0]
                batch_acc = batch_corrects / (labels.size(0))

                logging.info('%s [%d-%d] | batch-loss: %.3f | acc@1: %.3f'
                             % (dt(), epoch, batch_cnt, loss.data[0], batch_acc))

            if len(hard_examples) == 8:
                optimizer.zero_grad()
                hard_examples = torch.cat(hard_examples,dim=0)
                hard_labels = torch.cat(hard_labels, dim=0)
                outputs = model(hard_examples)
                loss, _ = criterion(outputs, hard_labels)
                loss.backward()
                optimizer.step()
                hard_examples = []
                hard_labels = []



            if step % val_inter == 0:
                logging.info('--' * 30)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                val_mAP, val_acc = predict(model, data_set['val'], data_loader['val'], whileTraing=True)

                if val_mAP > best_mAP:
                    best_mAP = val_mAP
                    best_acc = val_acc
                    best_model_wts = copy.deepcopy(model.state_dict())

                # save model
                save_path = os.path.join(save_dir,
                                         'weights-%d-%d-[%.4f]-[%.4f].pth' % (epoch, batch_cnt, val_acc, val_mAP))
                torch.save(model.state_dict(), save_path)
                logging.info('saved model to %s' % (save_path))
                logging.info('--' * 30)

    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.4f]-[%.4f].pth' % (best_acc, best_mAP))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))

    return best_acc, best_model_wts
