# coding=utf8
from __future__ import division
import torch
import os,time,datetime
from torch.autograd import Variable
import logging
import numpy as np
from math import ceil
from torch.nn import functional as F


def dt():
    return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')


def trainlog(logfilepath, head='%(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          start_epoch,
          optimizer,
          criterion,
          exp_lr_scheduler,
          data_set,
          data_loader,
          save_dir,
          print_inter=200,
          val_inter=3500
          ):

    step = -1
    for epoch in range(start_epoch, epoch_num):
        # train phase
        model.train(True)  # Set model to training mode

        for batch_cnt, data in enumerate(data_loader['train']):

            step+=1
            model.train(True)
            # print data
            inputs, labels = data
            # inputs = inputs/255.0
            inputs = Variable(inputs.cuda())
            # inputs = inputs.permute(0, 3, 2, 1)
            # print('labels: ', labels)   # ['73', '84', '11', '50']
            # labels = Variable(torch.from_numpy(np.array(labels)).long().cuda())
            # zero the parameter gradients
            optimizer.zero_grad()

            out_x_0, out_x_1, out_x_2 = model(inputs)
            # print(out_x_0.shape)

            labels_0 = []
            labels_1 = []
            labels_2 = []
            to_sum_correct_gtlabel_list_all = []
            for one_label in labels:
                org_label_list = list(str(one_label))
                if len(org_label_list) == 1:
                    # to_sum_correct_label_list = [10, 10, org_label_list[0]]
                    org_label_list = '--' + ''.join(org_label_list)
                elif len(org_label_list) == 2:
                    # to_sum_correct_label_list = [10, org_label_list[0], org_label_list[1]]
                    org_label_list = '-' + ''.join(org_label_list)
                # else:
                    # to_sum_correct_label_list = list(str(one_label))
                to_sum_correct_gtlabel_list_all.append(one_label)
                for i, l in enumerate(list(org_label_list)):
                    if l == '-':
                        l_onehot = 10   # np.eye(1, 11, 10)[0]
                    else:
                        l_onehot = int(l)   # np.eye(1, 11, l)[0]
                    if i == 0:
                        labels_0.append(l_onehot)
                    elif i == 1:
                        labels_1.append(l_onehot)
                    elif i == 2:
                        labels_2.append(l_onehot)
            # print(batch_label_onehot)
            # print("array:----\n", np.asarray(batch_label_onehot))
            # batch_label_onehot = np.asarray(batch_label_onehot)
            # label_0 = batch_label_onehot[:, :1, :]
            labels_0 = Variable(torch.from_numpy(np.array(labels_0)).long().cuda())
            labels_1 = Variable(torch.from_numpy(np.array(labels_1)).long().cuda())
            labels_2 = Variable(torch.from_numpy(np.array(labels_2)).long().cuda())
            # print('-------\n', labels_0, out_x_0)
            loss0 = criterion(out_x_0, labels_0)
            loss1 = criterion(out_x_1, labels_1)
            loss2 = criterion(out_x_2, labels_2)
            loss = loss0 + loss1 + loss2
            # print("train loss:", loss)
            # exit(0)
            _, preds0 = torch.max(out_x_0, 1)
            _, preds1 = torch.max(out_x_1, 1)
            _, preds2 = torch.max(out_x_2, 1)
            loss.backward()
            optimizer.step()
            # print("preds0, preds1:  ", preds0, preds1)

            # batch loss
            if step % print_inter == 0:
                _, preds0 = torch.max(out_x_0, 1)
                _, preds1 = torch.max(out_x_1, 1)
                _, preds2 = torch.max(out_x_2, 1)
                preds0 = preds0.cpu().numpy(); preds1 = preds1.cpu().numpy(); preds2 = preds2.cpu().numpy()
                preds = np.asarray([preds0, preds1, preds2])
                # print("new preds:", preds)
                preds_list = []
                for i in range(len(preds[0])):
                    one_pred_list = preds[0:, i]
                    one_pred = ''
                    for ii in range(3):
                        if one_pred_list[ii] != 10:
                            one_pred += str(one_pred_list[ii])
                    if one_pred == '':
                        one_pred = '111'
                    one_pred = int(one_pred)
                    preds_list.append(one_pred)
                # print("train pred:", preds_list, "\n gt:", to_sum_correct_gtlabel_list_all)
                # exit(0)
                preds_list = torch.from_numpy(np.asarray(preds_list))
                labels = torch.from_numpy(np.asarray(labels))
                batch_corrects = torch.sum((preds_list == labels)).item()


                batch_acc = batch_corrects / (labels.size(0))

                logging.info('%s [%d-%d] | batch-loss: %.3f | batch_acc@1: %.3f'
                             % (dt(), epoch, batch_cnt, loss.item(), batch_acc))

            if step % val_inter == 0:
                # logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                # val phase
                model.train(False)  # Set model to evaluate mode

                val_loss = 0
                val_corrects = 0
                val_size = ceil(len(data_set['val']) / data_loader['val'].batch_size)

                t0 = time.time()

                for batch_cnt_val, data_val in enumerate(data_loader['val']):
                    # print data
                    inputs,  labels = data_val

                    inputs = Variable(inputs.cuda())
                    # labels = Variable(torch.from_numpy(np.array(labels)).long().cuda())

                    # forward
                    out_x_0, out_x_1, out_x_2 = model(inputs)

                    labels_0 = []
                    labels_1 = []
                    labels_2 = []
                    to_sum_correct_gtlabel_list_all = []
                    for one_label in labels:
                        org_label_list = list(str(one_label))
                        if len(org_label_list) == 1:
                            # to_sum_correct_label_list = [10, 10, org_label_list[0]]
                            org_label_list = '--' + ''.join(org_label_list)
                        elif len(org_label_list) == 2:
                            # to_sum_correct_label_list = [10, org_label_list[0], org_label_list[1]]
                            org_label_list = '-' + ''.join(org_label_list)
                        # else:
                        # to_sum_correct_label_list = list(str(one_label))
                        to_sum_correct_gtlabel_list_all.append(one_label)
                        for i, l in enumerate(list(org_label_list)):
                            if l == '-':
                                l_onehot = 10  # np.eye(1, 11, 10)[0]
                            else:
                                l_onehot = int(l)  # np.eye(1, 11, l)[0]
                            if i == 0:
                                labels_0.append(l_onehot)
                            elif i == 1:
                                labels_1.append(l_onehot)
                            elif i == 2:
                                labels_2.append(l_onehot)

                    labels_0 = Variable(torch.from_numpy(np.array(labels_0)).long().cuda())
                    labels_1 = Variable(torch.from_numpy(np.array(labels_1)).long().cuda())
                    labels_2 = Variable(torch.from_numpy(np.array(labels_2)).long().cuda())
                    # print('-------\n', labels_0, out_x_0)
                    loss0 = criterion(out_x_0, labels_0)
                    loss1 = criterion(out_x_1, labels_1)
                    loss2 = criterion(out_x_2, labels_2)
                    val_loss = loss0 + loss1 + loss2
                    # print("val loss:", val_loss)

                    _, preds0 = torch.max(out_x_0, 1)
                    _, preds1 = torch.max(out_x_1, 1)
                    _, preds2 = torch.max(out_x_2, 1)
                    preds0 = preds0.cpu().numpy();
                    preds1 = preds1.cpu().numpy();
                    preds2 = preds2.cpu().numpy()
                    preds = np.asarray([preds0, preds1, preds2])
                    # print("new preds:", preds)
                    preds_list = []
                    for i in range(len(preds[0])):
                        one_pred_list = preds[0:, i]
                        one_pred = ''
                        for ii in range(3):
                            if one_pred_list[ii] != 10:
                                one_pred += str(one_pred_list[ii])
                        one_pred = int(one_pred)
                        preds_list.append(one_pred)
                    # print("val preds::", preds_list, "\n gt:", labels)
                    preds_list = torch.from_numpy(np.asarray(preds_list))
                    labels = torch.from_numpy(np.asarray(labels))
                    batch_corrects = torch.sum((preds_list == labels)).item()
                    val_corrects += batch_corrects

                val_loss = val_loss / val_size
                val_acc = 1.0 * val_corrects / len(data_set['val'])

                t1 = time.time()
                since = t1-t0
                logging.info('--'*30)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())

                logging.info('%s epoch[%d]-val-loss: %.4f ||val-acc@1: %.4f ||time: %d'
                             % (dt(), epoch, val_loss, val_acc, since))

                # save model
                # save_path = os.path.join(save_dir,
                #         'weights-%d-%d-[%.4f].pth'%(epoch,batch_cnt,val_acc))
                save_path = os.path.join(save_dir,
                        'weights-{}-{}-[{}]-{}.pth'.format(epoch,batch_cnt,val_acc,str(time.time())))
                if val_acc > 0.9 and epoch > 5:
                    torch.save(model.state_dict(), save_path)
                    logging.info('%s saved model to %s' % (epoch, save_path))
                logging.info('--' * 30)
                # if val_acc > 0.999:
                #     return save_path
        exp_lr_scheduler.step(epoch)
    return save_path

