import os
import numpy as np
import logging

import torch
import torch.nn as nn
from torch.autograd import Variable

from Tools.opts_train import pair2_parser
from Tools.TFLogger import TFLogger
from Tools.utils import AverageMeter

from Config.Config import TFLOG_ROOT_PATH

from FeatureExtruct.DPN107_RGB.RGB_SinglePicture_Dataset import RGB_Single_Frame_Dataset

args = pair2_parser.parse_args()

##########################################
# set paramtere

SAVE_DIR        = args.savedir
EX_NAME         = args.exname
EPOCHS          = args.epochs or 80
TRAIN_ITER_NUM  = 1000
VAL_ITER_NUM    = 3
LR              = args.lr or 0.1
LR_DELAY_RATE   = args.lr_delay_rate or 0.1
MOMENTUM        = args.momentum or 0.9
LR_STEPS        = args.lr_steps
PRINT_FREQ      = args.print_freq
EVAL_FREQ       = 20
RESUME_CKPT     = args.resume_ckpt
BASE_STEP       = args.basestep

###########################################
# set log
if os.path.exists(SAVE_DIR) == False:
    os.mkdir(SAVE_DIR)

logger = logging.getLogger('dpn107_rank_2_fineturn_Test1')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(SAVE_DIR,'run_rank.log'))
formatter = logging.Formatter('%(asctime)s:%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)

tflog = TFLogger(os.path.join(TFLOG_ROOT_PATH,EX_NAME))

##########################################
# prepare model

logging.info('ARG SETTING: {}'.format(args))
logging.info('PREPARE MODEL...')

# load dataset
train_dataset = RGB_Single_Frame_Dataset('training','train')
val_dataset   = RGB_Single_Frame_Dataset('validation','test')

# load model dpn107
model = torch.load('/mnt/md1/Experiments/DPN_Extruct_200_Test1/raw_dpn107_rank_model.pkl')
model = nn.DataParallel(model).cuda()
crossloss = nn.CrossEntropyLoss().cuda()

optim = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9)

##########################################

def addjust_optime_lr(model,epochid):

    global LR,LR_DELAY_RATE,LR_STEPS

    if epochid in LR_STEPS:
        LR = LR*LR_DELAY_RATE
        logging.info('Change Learing Rate to: {}'.format(LR))
        optim = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9)
        return optim
    return None

def save_model(model,epochid):
    FILE = os.path.join(SAVE_DIR,'dpn107_Rank_fineturn_2_model_{:03d}.ckpt'.format(epochid))
    torch.save(model.state_dict(),FILE)

##########################################
# train
if RESUME_CKPT is not None:
    model.load_state_dict(torch.load(RESUME_CKPT))

logging.info('START TRAINING...')
train_step = BASE_STEP
val_step   = BASE_STEP

for ep in range(EPOCHS):

    train_loss_meta = AverageMeter()
    val_loss_meta = AverageMeter()

    logger.info('EPOCH: [{}/{}]'.format(ep,EPOCHS))

    model.train()
    for idx in range(TRAIN_ITER_NUM):

        model.zero_grad()
        inputx, labels = train_dataset.nextbatch_flow()
        labels[labels>0] = 1
        x = Variable(inputx).cuda()
        labels = Variable(torch.from_numpy(labels)).cuda().long()

        predict = model(x)

        loss = crossloss(predict,labels)
        loss.backward()
        train_loss_meta.update(loss.data[0])
        optim.step()

        train_step += 1
        tflog.scalar_summary('train_loss',loss.data[0],train_step)

        if idx%PRINT_FREQ == 0:
            logger.info('Epoch {} {}/{} current_train_loss: {} loss_avg: {avg_train_loss.avg}'.format(ep,idx, TRAIN_ITER_NUM, loss.data[0], avg_train_loss=train_loss_meta))


        if idx%EVAL_FREQ == 0 :
            # eval
            model.eval()
            for idx in range(VAL_ITER_NUM):

                inputx, labels = val_dataset.nextbatch_flow()
                labels[labels>0] = 1
                x = Variable(inputx,volatile=True).cuda()
                labels = Variable(torch.from_numpy(labels),volatile=True).cuda().long()

                predict = model(x)
                loss = crossloss(predict,labels)
                val_loss_meta.update(loss.data[0])

                val_step += 1
                tflog.scalar_summary('val_loss',loss.data[0],val_step)

                if idx%PRINT_FREQ == 0:
                    logger.info('Epoch {} {}/{} current_val_loss: {} loss_avg: {avg_val_loss.avg}'.format(ep, idx, VAL_ITER_NUM, loss.data[0], avg_val_loss=val_loss_meta))


    logger.info('Epoch {} Train avg loss: {}'.format(ep, train_loss_meta.avg))
    noptim = addjust_optime_lr(model,ep)
    if noptim is not None: optim = noptim
    logger.info('Epoch {} Val avg loss: {}'.format(ep, val_loss_meta.avg))
    # save model
    save_model(model,ep)
