import json
import time
from datetime import datetime
import warnings
import os
warnings.filterwarnings("ignore")

import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torch.optim as optim
import random
from logger import SummaryLogger
import utils
import utils
# from Models import *
import logging
import argparse
import model.data_loader as data_loader
import model.net_10_para as net10
import model.net_2_para as net2
import model.net_4_para as net4
from model.paraphraser import Paraphraser
from model.paraphraser import Translator
from model.paraphraser import Loss_student
from torch.optim.lr_scheduler import StepLR

parser = argparse.ArgumentParser()
parser.add_argument('--teacher_net', default='net_4',
                    help="input Teacher's name {net_4, net_6, net_8, net_10, resnet50}")
parser.add_argument('--student_net', default='net_2',
                    help="input Teacher's name {net_2, LeNet, resnet18}")
parser.add_argument('--model_dir', default='experiments/paraphrasing/4net2net/',
                    help="Directory containing params.json")
parser.add_argument('--rate', type=float, default=0.5, help='The paraphrase rate k')
parser.add_argument('--exp_name', default='cifar10/net4net2_Translator', type=str)



def eval(net, testloader):
    flag = 'Test'

    epoch_start_time = time.time()
    net.eval()
    val_loss = 0

    correct = 0

    total = 0
    criterion_CE = nn.CrossEntropyLoss()

    for batch_idx, (inputs, targets) in enumerate(testloader):
        inputs, targets = inputs.cuda(async=True), targets.cuda(async=True)
        outputs = net(inputs)

        loss = criterion_CE(outputs[-1], targets)
        val_loss += loss.item()

        _, predicted = torch.max(outputs[2].data, 1)
        total += targets.size(0)

        correct += predicted.eq(targets.data).cpu().sum().float().item()
        b_idx = batch_idx

    print('%s \t Time Taken: %.2f sec' % (flag, time.time() - epoch_start_time))
    print('Val Loss: %.3f | Val Acc net: %.3f%%' % (val_loss / (b_idx + 1), 100. * correct / total))
    return val_loss / (b_idx + 1), correct / total


def train(teacher, paraphraser_t,student, translator_s, params, trainloader, testloader, optimizer_s, optimizer_module):
    # scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer_s, milestones=[25, 50, 75, 100, 150, 225], gamma=0.5)
    scheduler_module = optim.lr_scheduler.MultiStepLR(optimizer_module, milestones=[25, 50, 75, 100, 150, 225], gamma=0.5)
    for epoch in range(params.num_epochs):
        f = open(os.path.join("logs/" + path, 'log.txt'), "a")
        scheduler.step()
        scheduler_module.step()
        epoch_start_time = time.time()
        print('\n EPOCH: %d' % epoch)
        print('STUDENT LEARNING RATE = {}'.format(optimizer_s.state_dict()['param_groups'][0]['lr']))
        print('STUDENT TRANS LEARNING RATE = {}'.format(optimizer_module.state_dict()['param_groups'][0]['lr']))

        teacher.eval()
        paraphraser_t.eval()
        student.train()
        translator_s.train()

        train_loss = 0
        correct = 0
        total = 0

        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(async=True), targets.cuda(async=True)
            optimizer_s.zero_grad()
            optimizer_module.zero_grad()

            ############## 根据论文进行中间层的KD ################################################################

            teacher_outputs = teacher(inputs)
            student_outputs = student(inputs)

            factor_t = paraphraser_t(teacher_outputs[1], 1)
            factor_s = translator_s(student_outputs[1])

            # Lstudent = Lcls + beta * LFT ,
            loss = Loss_student(500, factor_s, factor_t, student_outputs[-1], targets)

            ##########################################

            loss.backward()
            optimizer_s.step()
            optimizer_module.step()

            train_loss += loss.item()

            _, predicted = torch.max(student_outputs[-1].data, 1)
            total += targets.size(0)

            correct += predicted.eq(targets.data).cpu().sum().float().item()

            b_idx = batch_idx

        print('Train s1 \t Time Taken: %.2f sec' % (time.time() - epoch_start_time))
        avgloss = train_loss / (b_idx + 1)
        avgacc = 100. * correct / total
        print('Train Loss: %.3f | Train Acc net: %.3f%%|' % (train_loss / (b_idx + 1), 100. * correct / total))

        ############### 测试 ######################################################################

        val_loss, test_acc = eval(student, testloader)

        ##########################################################################################

        f.write('EPOCH {epoch} \t'
                'ACC_net : {acc_net:.4f} \t  \n'.format(epoch=epoch, acc_net=test_acc)
                )
        f.close()

    return 300




if __name__ == '__main__':
    # Load the parameters from json file
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'train_paraphraser_params.json')
    assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    print('the gpu is {}'.format(params.cuda))
    print('the current gpu is {}'.format(torch.cuda.current_device()))

    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'Student_Transformer_train.log'))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders, considering full-set vs. sub-set scenarios
    if params.subset_percent < 1.0:
        train_dl = data_loader.fetch_subset_dataloader('train', params)
    else:
        train_dl = data_loader.fetch_dataloader('train', params)

    dev_dl = data_loader.fetch_dataloader('dev', params)

    logging.info("- done.")

###################### 配置网络 #######################################################################

    Teacher = net4.Net(params)
    Teacher = Teacher.cuda() if params.cuda else Teacher
    teacher_checkpoint = 'experiments/base_cnn_4layer/best.pth.tar'
    utils.load_checkpoint(teacher_checkpoint, Teacher)

    RATE = args.rate
    Paraphraser_t = Paraphraser(32, int(round(64*RATE)))
    Paraphraser_t_checkpoint = 'experiments/base_cnn_2layer/paraphraser/4net_Module/best.pth.tar'
    utils.load_checkpoint(Paraphraser_t_checkpoint, Paraphraser_t)
    Paraphraser_t = Paraphraser_t.cuda() if params.cuda else Paraphraser_t

    Student = net2.Net(params)
    Translator_s = Translator(32, int(round(64*RATE)))
    Student = Student.cuda() if params.cuda else Student
    Translator_s = Translator_s.cuda() if params.cuda else Translator_s

############################################################################################################

    EXPERIMENT_NAME = args.exp_name
    time_log = datetime.now().strftime('%m_%d_%H%M')
    folder_name = 'paraphraser_{}'.format(time_log)
    path = os.path.join(EXPERIMENT_NAME, folder_name)
    if not os.path.exists('ckpt/' + path):
        os.makedirs('ckpt/' + path)
    if not os.path.exists('logs/' + path):
        os.makedirs('logs/' + path)

    # Save argparse arguments as logging
    with open('logs/{}/commandline_args.txt'.format(path), 'w') as f:
        json.dump(args.__dict__, f, indent=2)
    # Instantiate logger
    logger = SummaryLogger(path)

    ############ 优化器设置 ###################################################################################

    # optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
    optimizer_s = optim.SGD(Student.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    optimizer_module = optim.SGD(Translator_s.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    ##########################################################################################################

    epoch = train(Teacher, Paraphraser_t,Student, Translator_s, params, train_dl, dev_dl, optimizer_s, optimizer_module)

    utils.save_checkpoint({'epoch': epoch,
                           'state_dict': Student.state_dict(),
                           'optim_dict': optimizer_s.state_dict()},
                          is_best=True,
                          checkpoint='ckpt/' + path + '/Module_{}.pth'.format(epoch))

    utils.save_checkpoint({'epoch': epoch,
                           'state_dict': Translator_s.state_dict(),
                           'optim_dict': optimizer_module.state_dict()},
                          is_best=True,
                          checkpoint='ckpt/' + path + '/Translator_{}.pth'.format(epoch))