import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import logging
import os

from DataSet.Dataset_TSNscore_Pair import Dataset_TSNscore_Pair
from Model.origin_tcn import TCN_Pair2
from Tools.utils import AverageMeter

from Tools.opts_train import pair2_parser
args = pair2_parser.parse_args()

##########################################
# set paramtere

SAVE_DIR        = args.savedir
EPOCHS          = args.epochs or 50
TRAIN_ITER_NUM  = 1000
VAL_ITER_NUM    = 200
LR              = args.lr or 0.1
LR_DELAY_RATE   = args.lr_delay_rate or 0.1
MOMENTUM        = args.momentum or 0.9
LR_STEPS        = args.lr_steps
PRINT_FREQ      = args.print_freq
EVAL_FREQ       = args.eval_freq

###########################################
# set log
if os.path.exists(SAVE_DIR) == False:
    os.mkdir(SAVE_DIR)

logger = logging.getLogger('train_pair2')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(SAVE_DIR,'run.log'))
formatter = logging.Formatter('%(asctime)s:%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)


##########################################
# prepare model

logging.info('ARG SETTING: {}'.format(args))

logging.info('PREPARE MODEL...')

train_dataset = Dataset_TSNscore_Pair('training', 'train')
val_dataset   = Dataset_TSNscore_Pair('validation', 'train')
# val_dataset_fortest = Dataset_TSNscore('validation','test')

model = TCN_Pair2().cuda()
model = nn.DataParallel(model)
crossloss = nn.CrossEntropyLoss()

optim = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9)

##########################################

def addjust_optime_lr(model,epochid):
    global LR,LR_DELAY_RATE,LR_STEPS
    if epochid in LR_STEPS:
        LR = LR*LR_DELAY_RATE
        logging.info('Change Learing Rate to: {}'.format(LR))
        optim = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9)
        return optim
    return None

def save_model(model,epochid):
    FILE = os.path.join(SAVE_DIR,'pair2_model_{}.ckpt'.format(epochid))
    torch.save(model.state_dict(),FILE)

##########################################
# train

logging.info('START TRAINING...')

for ep in range(EPOCHS):

    train_loss_meta = AverageMeter()
    val_loss_meta = AverageMeter()

    logger.info('EPOCH: [{}/{}]'.format(ep,EPOCHS))

    model.train()
    for idx in range(TRAIN_ITER_NUM):

        model.zero_grad()
        inputx, labels = train_dataset.nextbatch()
        x1 = Variable(torch.from_numpy(inputx[:,:,:12,:])).cuda().float()
        x2 = Variable(torch.from_numpy(inputx[:,:,12:,:])).cuda().float()
        labels = Variable(torch.from_numpy(labels)).cuda().long()
        predict = model(x1,x2)
        loss = crossloss(predict,labels)
        loss.backward()
        train_loss_meta.update(loss.data[0])
        optim.step()

        if idx%PRINT_FREQ == 0:
            logger.info('{}/{} current_loss: {} loss_avg: {avg_train_loss.avg}'.format(idx, TRAIN_ITER_NUM, loss.data[0], avg_train_loss=train_loss_meta))

    logger.info('Epoch {} Train avg loss: {}'.format(ep, train_loss_meta.avg))

    noptim = addjust_optime_lr(model,ep)
    if noptim is not None: optim = noptim

    if ep%EVAL_FREQ and ep != 0:
        # save model
        save_model(model,ep)
        # eval
        model.eval()
        for idx in range(VAL_ITER_NUM):

            inputx, labels = val_dataset.nextbatch()

            x1 = Variable(torch.from_numpy(inputx[:,:,:12,:]),volatile=True).cuda().float()
            x2 = Variable(torch.from_numpy(inputx[:,:,12:,:]),volatile=True).cuda().float()
            labels = Variable(torch.from_numpy(labels),volatile=True).cuda().long()

            predict = model(x1,x2)
            loss = crossloss(predict,labels)
            val_loss_meta.update(loss.data[0])

            if idx%PRINT_FREQ == 0:
                logger.info('{}/{} current_loss: {} loss_avg: {avg_val_loss.avg}'.format(idx, VAL_ITER_NUM, loss.data[0], avg_val_loss=val_loss_meta))

        logger.info('Epoch {} Val avg loss: {}'.format(ep, val_loss_meta.avg))
