#coding=utf8
from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
import copy
from metrics import cal_mAP

def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')



def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          aux_criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          augloss=False,
          print_inter=200,
          val_inter=3500
          ):

    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_mAP = 0.0

    step = -1
    for epoch in range(start_epoch,epoch_num):
        # train phase
        exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode


        for batch_cnt, data in enumerate(data_loader['train']):

            step+=1
            model.train(True)
            # print data
            inputs, attr, attr_mask, labels, labels_str, aux_label = data

            inputs = Variable(inputs.cuda())
            labels = Variable(labels.cuda())
            aux_label = Variable(aux_label.cuda())


            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            if augloss:
                raise NotImplementedError()
            else:
                # print inputs
                outputs, aux_output = model(inputs)
                loss1 = criterion(outputs, labels)
                loss2 = aux_criterion(aux_output, aux_label)
                loss = loss1 + loss2

            loss.backward()
            optimizer.step()

            # batch loss
            if step % print_inter == 0:
                attr_mask = Variable(attr_mask.cuda())
                _, preds = torch.max(outputs * attr_mask, 1)
                _, labels = torch.max(labels, 1)

                aux_mask = torch.sum(aux_label, 1)
                _, aux_preds = torch.max(aux_output, 1)
                _, aux_label = torch.max(aux_label, 1)


                batch_corrects = torch.sum((preds == labels)).data[0]
                batch_acc = batch_corrects / (labels.size(0))

                batch_aux_corrects = (aux_preds == aux_label).float() * aux_mask
                batch_aux_acc = (torch.sum(batch_aux_corrects) / aux_mask.sum())

                logging.info('%s [%d-%d] | main-loss: %.3f | acc@1: %.3f | aux-loss: %.3f | aux-acc %.3f'
                             % (dt(), epoch, batch_cnt, loss1.data[0], batch_acc,loss2.data[0],batch_aux_acc.data[0]))


            if step % val_inter == 0:
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                # val phase
                model.train(False)  # Set model to evaluate mode

                val_loss = 0
                val_corrects = 0
                val_size = ceil(len(data_set['val']) / data_loader['val'].batch_size)

                val_preds = np.zeros((len(data_set['val']),data_set['val'].catlen),dtype=np.float32)
                val_true = np.zeros((len(data_set['val']),data_set['val'].catlen),dtype=np.float32)
                val_attr = np.zeros(len(data_set['val']),dtype=np.int)
                val_attr_mask = np.zeros((len(data_set['val']),data_set['val'].catlen),dtype=np.int)
                val_labels_str = np.empty((len(data_set['val']),data_set['val'].catlen),dtype='|S1')
                val_aux_preds = np.zeros(len(data_set['val']),dtype=np.float32)
                val_aux_true = np.zeros(len(data_set['val']), dtype=np.float32)
                val_aux_mask = np.zeros(len(data_set['val']), dtype=np.float32)

                t0 = time.time()
                idx = 0


                for batch_cnt_val, data_val in enumerate(data_loader['val']):
                    # print data
                    inputs, attr, attr_mask, labels, labels_str, aux_label = data_val

                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                    attr_mask = Variable(attr_mask.cuda())
                    aux_label = Variable(aux_label.cuda())



                    # forward
                    outputs, aux_output = model(inputs)

                    loss = criterion(outputs, labels)

                    _, preds = torch.max(outputs * attr_mask, 1)
                    _, labels_yonly = torch.max(labels, 1)
                    aux_mask = torch.sum(aux_label,1)
                    _, aux_preds = torch.max(aux_output, 1)
                    _, aux_label = torch.max(aux_label, 1)

                    # statistics
                    val_loss += loss.data[0]
                    batch_corrects = torch.sum((preds == labels_yonly)).data[0]

                    val_preds[idx:(idx+labels.size(0))] = outputs.data.cpu().numpy()
                    val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
                    val_attr[idx:(idx + labels.size(0))] = attr
                    val_attr_mask[idx:(idx + labels.size(0))] = attr_mask.data.cpu().numpy()
                    val_labels_str[idx:(idx + labels.size(0))] = labels_str

                    val_aux_preds[idx:(idx + labels.size(0))] = aux_preds.data.cpu().numpy()
                    val_aux_true[idx:(idx + labels.size(0))] = aux_label.data.cpu().numpy()
                    val_aux_mask[idx:(idx + labels.size(0))] = aux_mask.data.cpu().numpy()

                    val_corrects += batch_corrects

                    idx += labels.size(0)

                val_loss = val_loss / val_size
                val_acc = 1.0 * val_corrects / len(data_set['val'])
                val_mAP,APs, accs = cal_mAP(val_labels_str,val_preds,val_attr, data_set['val'].catidx_map)
                val_aux_acc = ((val_aux_preds==val_aux_true) * val_aux_mask).sum() / val_aux_mask.sum()

                t1 = time.time()
                since = t1-t0
                logging.info('--'*30)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())

                logging.info('%s epoch[%d]-val-loss: %.4f ||val-acc@1: %.4f ||val-mAP: %.4f val-aux-acc: %.4f||time: %d'
                             % (dt(), epoch, val_loss, val_acc,val_mAP, val_aux_acc,since))
                for key in APs.keys():
                    logging.info('acc: %.4f, AP: %.4f %s'%(accs[key], APs[key],key))

                if val_mAP > best_mAP:
                    best_mAP = val_mAP
                    best_acc = val_acc
                    best_model_wts = copy.deepcopy(model.state_dict())


                # save model
                save_path = os.path.join(save_dir,
                        'weights-%d-%d-[%.4f]-[%.4f].pth'%(epoch,batch_cnt,val_acc,val_mAP))
                torch.save(model.state_dict(), save_path)
                logging.info('saved model to %s' % (save_path))
                logging.info('--' * 30)

    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.4f]-[%.4f].pth' % (best_acc,best_mAP))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))


    return best_acc,best_model_wts

