#coding=utf8
from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from math import ceil
from sklearn.metrics import f1_score, precision_recall_fscore_support, classification_report
import copy

def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

def onehot(labels, num_class=4):
    labels = labels.unsqueeze(1)
    onehot = (torch.FloatTensor(labels.size(0), num_class).zero_())
    onehot.scatter_(1, labels, 1.)

    return onehot


def Mixup(data1, data2,  alpha, num_class):
    x1, y1 = data1
    x2, y2 = data2
    y1 = y1.unsqueeze(1)
    y2 = y2.unsqueeze(1)

    y1_oh = (torch.FloatTensor(y1.size(0), num_class).zero_())
    y1_oh.scatter_(1, y1, 1.)

    y2_oh = (torch.FloatTensor(y2.size(0), num_class).zero_())
    y2_oh.scatter_(1, y2, 1.)

    lam = np.random.beta(alpha, alpha)

    x = lam * x1 + (1. - lam) * x2
    y = lam * y1_oh + (1. - lam) * y2_oh

    return x,y


def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          batch_size,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          usecuda,
          save_inter,
          save_dir,
          augloss=False,
          mixup_inter=1,
          mixup_max=30
          ):

    best_model_wts = model.state_dict()
    best_acc = 0.0

    step = -1
    for epoch in range(start_epoch,epoch_num):
        if (epoch % mixup_inter == 0) and (epoch<=mixup_max):
            do_mixup = 1
        else:
            do_mixup = 0



        # train phase
        exp_lr_scheduler.step(epoch)
        model.train(True)  # Set model to training mode

        # for data1, data2 in zip(data_loader['train1'],data_loader['train2']):
        for batch_cnt,data in enumerate(data_loader['train']):
            step+=1
            data1, data2 = data

            if do_mixup == 1:
                inputs, labels_oh = Mixup(data1, data2,alpha=0.1,num_class=4)
            else:
                inputs, labels = data1
                labels_oh = onehot(labels, num_class=4)

            model.train(True)
            if usecuda:
                inputs = Variable(inputs.cuda())
                labels_oh = Variable(labels_oh.cuda())

            else:
                inputs = Variable(inputs)
                labels_oh = Variable(labels_oh)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            if augloss:
                outputs,aux = model(inputs)
                loss = criterion(outputs, labels_oh) + criterion(aux, labels_oh)
            else:
                outputs = model(inputs)

                loss = criterion(outputs, labels_oh)

            _, labels = torch.max(labels_oh, 1)
            _, preds = torch.max(F.softmax(outputs), 1)
            loss.backward()
            optimizer.step()

            # batch loss
            if step % 200 == 0:
                # batch_corrects = torch.sum((preds == labels)).data[0]
                # batch_acc = batch_corrects / (labels.size(0))
                batch_f1 = f1_score(labels.data.cpu().numpy(), preds.data.cpu().numpy(), average='macro')

                unique, counts = np.unique(labels.data.cpu().numpy(), return_counts=True)

                logging.info('%s [%d-%d] | batch-loss: %.3f | f1: %.3f | %s | mixup = %d'
                             % (dt(), epoch, batch_cnt, loss.data[0], batch_f1, counts, do_mixup))


            if step % 3500 == 0:
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                # val phase
                model.train(False)  # Set model to evaluate mode

                val_loss = 0
                val_corrects = 0
                val_size = ceil(len(data_set['val']) / data_loader['val'].batch_size)
                val_preds = np.zeros(len(data_set['val']))+100
                val_true = np.zeros(len(data_set['val']))+100
                t0 = time.time()
                idx = 0


                for batch_cnt_val, data_val in enumerate(data_loader['val']):
                    # print data
                    inputs, labels = data_val
                    labels_oh = onehot(labels, num_class=4)

                    if usecuda:
                        inputs = Variable(inputs.cuda())
                        labels_oh = Variable(labels_oh.cuda())
                        labels = Variable(labels.cuda())

                    else:
                        inputs = Variable(inputs)
                        labels_oh = Variable(labels_oh)
                        labels = Variable(labels)

                    # forward
                    outputs = model(inputs)


                    loss = criterion(outputs, labels_oh)

                    _, preds = torch.max(F.softmax(outputs), 1)

                    # statistics
                    val_loss += loss.data[0]
                    batch_corrects = torch.sum((preds == labels)).data[0]

                    val_preds[idx:(idx+labels.size(0))] = preds.data.cpu().numpy()
                    val_true[idx:(idx + labels.size(0))] = labels.data.cpu().numpy()

                    val_corrects += batch_corrects

                    idx += labels.size(0)

                val_loss = val_loss / val_size
                val_f1 = f1_score(val_true, val_preds,average='macro')
                val_report = classification_report(val_true, val_preds, target_names=data_set['val'].classes)
                unique, counts = np.unique(val_preds, return_counts=True)

                t1 = time.time()
                since = t1-t0
                logging.info('--'*30)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                logging.info('%s'%val_report)
                logging.info('pred unique: %s' % unique)
                logging.info('pred count: %s'%counts)
                logging.info('%s epoch[%d]-val-loss: %.4f ||val-f1@1 : %.4f||time: %d'
                             % (dt(), epoch, val_loss, val_f1, since))
                logging.info('--' * 30)

                if val_f1 > best_acc:
                    best_acc = val_f1
                    best_model_wts = copy.deepcopy(model.state_dict())


                # save model
                save_path = os.path.join(save_dir,
                        'weights-%d-%d-[%.4f].pth'%(epoch,batch_cnt,val_f1))
                torch.save(model.state_dict(), save_path)
                logging.info('saved model to %s' % (save_path))

    # save best model
    save_path = os.path.join(save_dir,
                             'bestweights-[%.4f].pth' % (best_acc))
    torch.save(best_model_wts, save_path)
    logging.info('saved model to %s' % (save_path))


    return best_acc,best_model_wts

