import json
import time
from datetime import datetime
import warnings
import os
warnings.filterwarnings("ignore")

import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torch.optim as optim
import random
from logger import SummaryLogger
import utils
import utils
# from Models import *
import logging
import argparse
import model.data_loader as data_loader
import model.net_10_para as net10
import model.net_4_para as net4
from model.paraphraser import Paraphraser
from torch.optim.lr_scheduler import StepLR

parser = argparse.ArgumentParser()
parser.add_argument('--teacher_net', default='net_4',
                    help="input Teacher's name {net_4, net_6, net_8, net_10}")
parser.add_argument('--model_dir', default='experiments/paraphrasing/4net2net/',
                    help="Directory containing params.json")
parser.add_argument('--rate', type=float, default=0.5, help='The paraphrase rate k')
parser.add_argument('--exp_name', default='cifar10/net4_Paraphraser', type=str)



def train(model, module, params, trainloader, optimizer):
    # scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[75, 150, 225], gamma=0.5)
    for epoch in range(params.num_epochs):
        scheduler.step()
        epoch_start_time = time.time()
        print('\n EPOCH: %d' % epoch)
        print('LEARNING RATE = {}'.format(optimizer.state_dict()['param_groups'][0]['lr']))
        model.eval()
        module.train()

        train_loss = 0

        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(async=True), targets.cuda(async=True)
            optimizer.zero_grad()

            outputs = model(inputs)
            output_p = module(outputs[-2], 0)
            loss = nn.L1Loss()(output_p, outputs[-2].detach())

            loss.backward()
            optimizer.step()

            train_loss += loss.item()

            b_idx = batch_idx

        print('Train s1 \t Time Taken: %.2f sec' % (time.time() - epoch_start_time))
        avgloss = train_loss / (b_idx + 1)
        print('Loss: %.3f | ' % avgloss)
        if avgloss < 0.15:
            return epoch

    return 300




if __name__ == '__main__':
    # Load the parameters from json file
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'train_paraphraser_params.json')
    assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    print('the gpu is {}'.format(params.cuda))
    print('the current gpu is {}'.format(torch.cuda.current_device()))

    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'Paraphraser_train.log'))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders, considering full-set vs. sub-set scenarios
    if params.subset_percent < 1.0:
        train_dl = data_loader.fetch_subset_dataloader('train', params)
    else:
        train_dl = data_loader.fetch_dataloader('train', params)

    dev_dl = data_loader.fetch_dataloader('dev', params)

    logging.info("- done.")

    model = net4.Net(params)
    model = model.cuda() if params.cuda else model
    teacher_checkpoint = 'experiments/base_cnn_4layer/best.pth.tar'
    utils.load_checkpoint(teacher_checkpoint, model)

    RATE = args.rate

    Paraphraser_t = Paraphraser(32, int(round(64*RATE)))
    Paraphraser_t = Paraphraser_t.cuda() if params.cuda else Paraphraser_t

    EXPERIMENT_NAME = args.exp_name
    time_log = datetime.now().strftime('%m_%d_%H%M')
    folder_name = 'paraphraser_{}'.format(time_log)
    path = os.path.join(EXPERIMENT_NAME, folder_name)
    if not os.path.exists('ckpt/' + path):
        os.makedirs('ckpt/' + path)
    if not os.path.exists('logs/' + path):
        os.makedirs('logs/' + path)

    # Save argparse arguments as logging
    with open('logs/{}/commandline_args.txt'.format(path), 'w') as f:
        json.dump(args.__dict__, f, indent=2)
    # Instantiate logger
    logger = SummaryLogger(path)

    # optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
    optimizer = optim.SGD(Paraphraser_t.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    epoch = train(model, Paraphraser_t, params, train_dl, optimizer)

    utils.save_checkpoint({'epoch': epoch,
                           'state_dict': Paraphraser_t.state_dict(),
                           'optim_dict': optimizer.state_dict()},
                          is_best=True,
                          checkpoint='ckpt/' + path + '/Module_{}.pth'.format(epoch))