# coding=utf8
from __future__ import division
import torch
import os, time
from torch.autograd import Variable
import logging
import torch.nn.functional as F
import numpy as np
from logs import configlog
from torch import nn
from metrics import  *
import datetime

def trainlog(logfilepath, head='%(asctime)-15s %(message)s'):
    configlog(logfilepath, head='%(asctime)-15s %(message)s')

def dt():
    return datetime.datetime.now().strftime('%H:%M:%S')

def train(model,
          epoch_num,
          batch_size,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          dataset,
          data_loader,
          usecuda,
          save_inter,
          save_dir
          ):

    for epoch in range(start_epoch, epoch_num):
        t_s = time.time()

        for phase in ['train', 'val']:
            if phase == 'train':
                exp_lr_scheduler.step(epoch)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                model.train(True)  # Set model to training mode
            else:
                model.train(False)  # Set model to evaluate mode

            epoch_loss = 0
            epoch_corrects = 0
            epoch_size = len(dataset[phase]) // batch_size

            t0 = time.time()
            for batch_cnt, data in enumerate(data_loader[phase]):
                # print data
                t1 = time.time()
                since = t1 - t0
                t0 = t1
                imgs,labels = data

                if usecuda>=1:
                    imgs = Variable(imgs.cuda())
                    labels = Variable(labels.cuda())

                else:
                    imgs = Variable(imgs)
                    labels = Variable(labels.cuda())
                # zero the parameter gradients
                optimizer.zero_grad()

                # forward

                outputs = model(imgs)
                loss = criterion(outputs,labels)
                _, preds = torch.max(outputs.data, 1)


                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                # statistics
                epoch_loss += loss.data[0]

                batch_corrects = torch.sum(preds == labels.data)
                batch_acc = batch_corrects / preds.size(0)

                epoch_corrects += batch_corrects

                # batch loss
                if (batch_cnt % 5 == 0) and phase == 'train':
                    logging.info(
                        'epoch[%d]-batch[%d] ||batch-loss: %.4f ||acc @1: %.3f ||%.3f sec/batch '
                        % (epoch, batch_cnt, loss.data[0], batch_acc, since))

            epoch_acc = epoch_corrects / len(dataset[phase])
            epoch_loss = epoch_loss / epoch_size

            if phase == 'train':
                logging.info('epoch[%d]-train-loss: %.4f||train-acc@1: %.4f '
                             % (epoch, epoch_loss, epoch_acc))

            if phase == 'val':
                logging.info('epoch[%d]-val-loss: %.4f ||val-acc@1 : %.4f'
                             % (epoch, epoch_loss, epoch_acc))
                # save model
                if epoch % save_inter == 0:
                    save_path = os.path.join(save_dir,
                                             'weights-%d-[%.4f].pth' % (epoch, epoch_acc))
                    if usecuda <= 1:
                        torch.save(model.state_dict(), save_path)
                    # multi gpu
                    elif usecuda >=2:
                        torch.save(model.module.state_dict(), save_path)
                    logging.info('saved model to %s' % (save_path))
                t_e = time.time()
                logging.info('----time cost: %d sec' % (t_e - t_s))
                logging.info('===' * 20)

    save_path = os.path.join(save_dir,'weights-%d-[%.4f].pth' % (epoch, epoch_acc))
    if usecuda <= 1:
        torch.save(model.state_dict(), save_path)
    # multi gpu
    elif usecuda >= 2:
        torch.save(model.module.state_dict(), save_path)
    logging.info('saved model to %s' % (save_path))


def train_eval_epoch(model,
                    optimizer,
                    criterion,
                    epoch,
                    exp_lr_scheduler,
                    dataset,
                    data_loader,
                    usecuda,
                    save_inter,
                    save_dir
                    ):


    t_s = time.time()

    # train phase
    exp_lr_scheduler.step(epoch)
    logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
    model.train(True)  # Set model to training mode

    epoch_loss = 0
    epoch_corrects = 0

    t0 = time.time()
    for batch_cnt, data in enumerate(data_loader['train']):
        if batch_cnt>=4000:break
        # print data
        t1 = time.time()
        since = t1 - t0
        t0 = t1
        imgs,labels = data

        if usecuda:
            imgs = Variable(imgs.cuda())
            labels = Variable(labels.cuda())
        else:
            imgs = Variable(imgs)
            labels = Variable(labels.cuda())
        # zero the parameter gradients
        optimizer.zero_grad()

        # forward
        emd,logits = model(imgs)
        loss1 = criterion[0](logits,labels)  # softmax loss
        loss2 = criterion[1](emd)  # loose r loss
        loss = loss1 + loss2
        _, preds = torch.max(logits.data, 1)

        # backward + optimize only if in training phase
        loss.backward()
        optimizer.step()
        # statistics
        epoch_loss += loss.data[0]

        batch_corrects = torch.sum(preds == labels.data)
        batch_acc = batch_corrects / preds.size(0)

        epoch_corrects += batch_corrects

        # batch loss
        if (batch_cnt % 200 == 0):
            logging.info(
                'epoch[%d]-batch[%d] ||batch-loss: %.4f ||acc @1: %.3f ||%.3f sec/batch '
                % (epoch, batch_cnt, loss.data[0], batch_acc, since))

            # eval phase
            model.eval()

            scores_all = np.zeros(len(dataset['val']))
            labels_all = np.zeros(len(dataset['val']))

            idx = 0
            for batch_cnt_val, data in enumerate(data_loader['val']):
                imgs1, imgs2, labels = data

                if usecuda:
                    imgs1 = Variable(imgs1.cuda())
                    imgs2 = Variable(imgs2.cuda())
                    labels = Variable(labels.cuda())

                else:
                    imgs1 = Variable(imgs1)
                    imgs2 = Variable(imgs2)
                    labels = Variable(labels.cuda())
                #
                # # forward
                emd1, _ = model(imgs1)
                emd2, _ = model(imgs2)

                emd1 = emd1.data.cpu().numpy()
                emd2 = emd2.data.cpu().numpy()
                labels = labels.data.cpu().numpy()

                scores = cal_cos(emd1, emd2)

                scores_all[idx:idx + labels.shape[0]] = scores
                labels_all[idx:idx + labels.shape[0]] = labels

                idx = idx + labels.shape[0]

            fpr, tpr, thresh = roc_curve(labels_all, scores_all)
            auc_score = auc(fpr, tpr)
            best_acc, best_thresh = find_best_acc(labels_all, scores_all)

            logging.info('---' * 20)
            logging.info('epoch[%d]-batch[%d] eval:' % (epoch,batch_cnt))
            logging.info('auc_score:\t %.5f' % auc_score)
            logging.info('best_acc:\t %.5f' % best_acc)
            logging.info('best_thresh\t %.5f' % best_thresh)
            logging.info('---' * 20)
            model.train()

    epoch_acc = epoch_corrects / len(dataset['train'])
    logging.info('epoch[%d]||train-acc@1: %.4f '
                 % (epoch, epoch_acc))


    # save model
    if epoch % save_inter == 0:
        save_path = os.path.join(save_dir,
                                 'weights-%d-acc[%.4f]-thresh[%.4f].pth' % (
                                     epoch, best_acc,best_thresh))
        if usecuda <= 1:
            torch.save(model.state_dict(), save_path)
        # multi gpu
        elif usecuda >=2:
            torch.save(model.module.state_dict(), save_path)
        logging.info('saved model to %s' % (save_path))
    t_e = time.time()
    logging.info('----time cost: %d sec' % (t_e - t_s))
    logging.info('===' * 20)


def train_eval_epoch_sphere(model,
                            optimizer,
                            criterion,
                            epoch,
                            exp_lr_scheduler,
                            dataset,
                            data_loader,
                            usecuda,
                            ):

    t_s = time.time()

    # train phase
    exp_lr_scheduler.step(epoch)
    logging.info('current lr:%s' % exp_lr_scheduler.get_lr())

    epoch_loss = 0
    epoch_corrects = 0
    total = 0
    batch_idx=0

    t0 = time.time()

    # train
    while True:
        img, label = data_loader['train'].get()
        if img is None: break
        inputs = torch.from_numpy(img).float()
        targets = torch.from_numpy(label[:,0]).long()
        if usecuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        lossd = loss.data[0]
        loss.backward()
        optimizer.step()

        epoch_loss += loss.data[0]
        outputs = outputs[0] # 0=cos_theta 1=phi_theta
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        epoch_corrects += predicted.eq(targets.data).cpu().sum()

        if batch_idx%500==0:
            logging.info(' epoch=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d'
                % (epoch,epoch_loss/(batch_idx+1), 100.0*epoch_corrects/total, epoch_corrects, total,
                lossd, criterion.lamb, criterion.it))
        batch_idx += 1


