#coding=utf8
from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
import copy
from metrics import cal_mAP

def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')



def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          start_epoch,
          FSoptimizer,
          LMoptimizer,
          FScriterion,
          LMcriterion,
          FSexp_lr_scheduler,
          LMexp_lr_scheduler,
          data_provider,
          save_dir,
          augloss=False,
          print_inter=200,
          val_inter=3500
          ):

    best_model_wts = model.state_dict()
    best_acc = 0.0
    best_mAP = 0.0

    FSstep = -1
    LMstep = -1
    running_loss = 9999
    while data_provider.FSepoch < epoch_num:

        if FSstep % 2 == 0:
            LMstep += 1
            LMexp_lr_scheduler.step(data_provider.LMepoch)
            model.train(True)  # Set model to training mode
            batch_data = data_provider.LMnext()
            imgs, lm_masks, vis_masks, ori_sizes, flms, cate_idxs = batch_data

            imgs = Variable(imgs.cuda())
            lm_masks = Variable(lm_masks.cuda())
            vis_masks = Variable(vis_masks.cuda())

            # zero the parameter gradients
            LMoptimizer.zero_grad()

            outputs = model(imgs, isClf=False)

            if outputs[0].size() != lm_masks.size():
                outputs = F.upsample(outputs, size=lm_masks.size()[-2:], mode='bilinear')
            loss = LMcriterion(outputs, lm_masks, vis_masks)
            loss.backward()
            LMoptimizer.step()

            running_loss = running_loss * 0.95 + 0.05 * loss.data[0]


            if LMstep % print_inter == 0:
                logging.info('%s LM[%d-%d] | batch-loss: %.3f | running-loss: %.3f'
                             % (dt(), data_provider.LMepoch, data_provider.LMiteration,
                                loss.data[0], running_loss))


        FSstep += 1
        FSexp_lr_scheduler.step(data_provider.FSepoch)
        model.train(True)  # Set model to training mode

        batch_data = data_provider.FSnext()
        inputs, attr, attr_mask, labels, labels_str = batch_data

        inputs = Variable(inputs.cuda())
        labels = Variable(labels.cuda())

        # zero the parameter gradients
        FSoptimizer.zero_grad()

        outputs = model(inputs, isClf=True)   # inform model to provide FS data out put
        loss = FScriterion(outputs, labels)


        # batch loss
        if FSstep % print_inter == 0:
            attr_mask = Variable(attr_mask.cuda())
            _, preds = torch.max(outputs * attr_mask, 1)
            _, labels = torch.max(labels, 1)

            batch_corrects = torch.sum((preds == labels)).data[0]
            batch_acc = batch_corrects / (labels.size(0))

            logging.info('%s FS[%d-%d] | batch-loss: %.3f | acc@1: %.3f'
                         % (dt(), data_provider.FSepoch, data_provider.FSiteration,
                            loss.data[0], batch_acc))


        # do val
        if FSstep % val_inter == 0:

            # FS dataset
            logging.info('current lr:%s' % FSexp_lr_scheduler.get_lr())
            # val phase
            model.eval()  # Set model to evaluate mode

            val_loss = 0
            val_corrects = 0
            val_len = len(data_provider.FSdataset['val'])
            val_bs = data_provider.FSdataloader['val'].batch_size
            val_catlen = data_provider.FSdataset['val'].catlen
            val_catidx_map =  data_provider.FSdataset['val'].catidx_map

            val_size = ceil(val_len / val_bs)

            val_preds = np.zeros((val_len, val_catlen), dtype=np.float32)
            val_true = np.zeros((val_len, val_catlen), dtype=np.float32)
            val_attr = np.zeros(val_len, dtype=np.int)
            val_attr_mask = np.zeros((val_len, val_catlen), dtype=np.int)
            val_labels_str = np.empty((val_len, val_catlen),dtype='|S1')



            t0 = time.time()
            idx = 0

            for batch_cnt_val, data_val in enumerate(data_provider.FSdataloader['val']):

                # print data
                inputs, attr, attr_mask, labels, labels_str = data_val

                inputs = Variable(inputs.cuda())
                labels = Variable(labels.cuda())
                attr_mask = Variable(attr_mask.cuda())


                # forward
                outputs = model(inputs)

                loss = FScriterion(outputs, labels)

                _, preds = torch.max(outputs * attr_mask, 1)
                _, labels_yonly = torch.max(labels, 1)

                # statistics
                val_loss += loss.data[0]
                batch_corrects = torch.sum((preds == labels_yonly)).data[0]

                val_preds[idx:(idx+labels.size(0))] = outputs.data.cpu().numpy()
                val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()
                val_attr[idx:(idx + labels.size(0))] = attr
                val_attr_mask[idx:(idx + labels.size(0))] = attr_mask.data.cpu().numpy()
                val_labels_str[idx:(idx + labels.size(0))] = labels_str

                val_corrects += batch_corrects

                idx += labels.size(0)

            val_loss = val_loss / val_size
            val_acc = 1.0 * val_corrects / val_len
            val_mAP, APs, accs = cal_mAP(val_labels_str,val_preds,val_attr, val_catidx_map)

            t1 = time.time()
            since = t1-t0
            logging.info('--'*30)
            logging.info('current lr:%s' % FSexp_lr_scheduler.get_lr())

            logging.info('%s FSepoch[%d]-val-loss: %.4f ||val-acc@1: %.4f ||val-mAP: %.4f ||time: %d'
                         % (dt(), data_provider.FSepoch, val_loss, val_acc,val_mAP, since))
            for key in APs.keys():
                logging.info('acc: %.4f, AP: %.4f %s'%(accs[key], APs[key],key))


