"""Main entrance for PPGN on tensorflow flowers"""

# import tool as tl
import argparse
import logging
import os
import time
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.autograd import Variable
from tqdm import tqdm
import random
import create_gauss_hot as gh
import utils
import model.PPGNets as ppgn
import model.net_10 as net10
import model.net as net
import model.net_2 as net2
import model.net_4 as net4
import model.net_6 as net6
import model.net_8 as net8
import model.tf_flower_dataloader as tfl_data_loader
from evaluate import evaluate, evaluate_kd
import csv
import ls_loss

parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/base_model',
                    help="Directory containing params.json")

parser.add_argument('--restore_file', default=None,
                    help="Optional, name of the file in --model_dir \
                    containing weights to reload before training")  # 'best' or 'train'

# label smoothing
parser.add_argument('--label_smoothing', action='store_true',default=False, help='flag for lsr')

# fake teacher    #KD中使用
parser.add_argument('--fake_noise_teacher', action='store_true',default=False, help='flag for Fake Noise Teacher')

#one_hot & Gauss hot exp      #baseline中使用
parser.add_argument('--gause_hot_exp', action='store_true',default=False, help='flag for Fake Gause Hot')


def to_variable(self, x):
    """Convert tensor to variable."""
    if torch.cuda.is_available():
        x = x.cuda()
    return Variable(x)

def train(G, E, C, G_optimizer, E_optimizer, C_loss_fn, train_dl, dev_dl, params, ppgn_loss_img, ppgn_loss):
    '''
    训练函数，分别训练G E C。
    :return: None
    '''
    start = time.time()

    E.train()
    G.train()
    C.train()

    fixed_noise = to_variable(torch.randn(params.batch_size, params.z_dim))
    comparator_feat_shape = (256, 6, 6)

    for epoch in range(params.num_epochs):
        for i, img_reals in enumerate(train_dl):
            img_reals = to_variable(img_reals)

            # 训练E，将图像放到E中获得real code h

            h1_in = torch.zeros(params.batch_size,
                                comparator_feat_shape, dtype='float32')
            h_real, h1_real = E(h1_in)

            # 得到的h和h1，其中h送去G，GAN过程
            img_fake = G(h_real)
            loss_img = ppgn_loss_img(img_fake, img_reals)
            if params.cuda:
                loss_img.cuda()

            # 将得到的图像继续送给E训练
            h_fake, h1_fake = E(img_fake)
            loss_h1 = ppgn_loss_img(h1_fake, h1_real)
            if params.cuda:
                loss_h1.cuda()




if __name__ == '__main__':
    dataset_path = './flower_photos/'
    tfl_data_loader.split_data(dataset_path)

    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    print('the gpu is {}'.format(params.cuda))
    if params.cuda:
        print('the current gpu is {}'.format(torch.cuda.current_device()))


    if params.subset_percent < 1.0:
        train_dl = tfl_data_loader.tf_flower_dl('train', params)
    else:
        train_dl = tfl_data_loader.tf_flower_dl('train', params)

    dev_dl = tfl_data_loader.tf_flower_dl('dev', params)

    G = ppgn.GenPPGN
    E = ppgn.EncPPGN
    C = net2.Net(params)

    #@todo(wangyao): 后期加入到params或者configs中
    lr = 1
    beta1 = 0.9
    beta2 = 0.999
    params.z_dim = 4096

    if params.cuda:
        G = G.cuda()
        E = E.cuda()
        C = C.cuda()
    G_optimizer = optim.Adam(G.parameters(),
                             lr, [beta1, beta2])
    E_optimizer = optim.Adam(E.parameters(),
                            lr, [beta1, beta2])

    C_loss_fn = net2.loss_fn

    ppgn_loss_img = ppgn.ppgn_loss_img
    ppgn_loss = ppgn.ppgn_loss_fn

    train(G, E, C, G_optimizer, E_optimizer, C_loss_fn, train_dl, dev_dl, params, ppgn_loss_img, ppgn_loss)