import os
from argparse import ArgumentParser

import torch
import torch.nn as nn
from torch import optim
import numpy

from Models_GAN import Generator, Generator_DC, Discriminator
import Dataloader_Uv

DATA_DIR = 'data'
CKPT_DIR = 'models'

G_FN = 'gan_g.pth'
D_FN = 'gan_d.pth'

G_LRN_RATE = 0.001
D_LRN_RATE = 0.001
MAX_GRAD_NORM = 5.0
# following values are modified at runtime
MAX_SEQ_LEN = 100
BATCH_SIZE = 1
BRIGHT_LOSS = False
GENERATOR_DC = False

EPSILON = 1e-40 # value to use to approximate zero (to prevent undefined results)

class BrightLoss(nn.Module):
    def __init__(self):
        super(BrightLoss, self).__init__()
        self.mseloss = torch.nn.MSELoss()

    def forward(self,x,y):
        real_batch = x.size(0)
        x = torch.sum(x,dim = 2)
        y = torch.sum(y,dim = 2)
        x = x[:,1:100] - x[:,0:99]
        y = y[:,1:100] - y[:,0:99]
        loss = self.mseloss(x,y)
        return loss

class GLoss(nn.Module):
    ''' C-RNN-GAN generator loss
    '''
    def __init__(self):
        super(GLoss, self).__init__()

    def forward(self, logits_gen):
        logits_gen = torch.clamp(logits_gen, EPSILON, 1.0)
        batch_loss = -torch.log(logits_gen)

        return torch.mean(batch_loss)


class DLoss(nn.Module):
    ''' C-RNN-GAN discriminator loss
    '''
    def __init__(self, label_smoothing=False):
        super(DLoss, self).__init__()
        self.label_smoothing = label_smoothing

    def forward(self, logits_real, logits_gen):                                                                                 
        ''' Discriminator loss

        logits_real: logits from D, when input is real
        logits_gen: logits from D, when input is from Generator

        loss = -(ylog(p) + (1-y)log(1-p))

        '''
        logits_real = torch.clamp(logits_real, EPSILON, 1.0)    #logits为D判断输入样本为真实样本的概率[0,1]
        d_loss_real = -torch.log(logits_real)

        if self.label_smoothing:
            p_fake = torch.clamp((1 - logits_real), EPSILON, 1.0)
            d_loss_fake = -torch.log(p_fake)
            d_loss_real = 0.9*d_loss_real + 0.1*d_loss_fake

        logits_gen = torch.clamp((1 - logits_gen), EPSILON, 1.0)
        d_loss_gen = -torch.log(logits_gen)

        batch_loss = d_loss_real + d_loss_gen
        return torch.mean(batch_loss)

def run_training(model, optimizer, criterion, dataloader, freeze_g=False, freeze_d=False):
    ''' Run single training epoch
    '''
    
    num_feats = dataloader.num_feats
    dataloader.rewind(datasetclass = 'train')
    batch_pic_list = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, datasetclass = 'train')    #取来自train数据集部分的真实样本
    
    model['g'].train()
    model['d'].train()

    loss = {}
    g_loss_total = 0.0
    d_loss_total = 0.0
    num_corrects = 0
    num_sample = 0

    while batch_pic_list.size(0) is not 0:

        real_batch_sz = batch_pic_list.size(0)

        g_states = model['g'].init_hidden(real_batch_sz)
        d_state = model['d'].init_hidden(real_batch_sz)

        #### GENERATOR ####
        if not freeze_g:                            #如果生成器需要训练，则清空过往梯度  
            optimizer['g'].zero_grad()              #如果生成器冻结，则不清空梯度

        # prepare inputs
        if GENERATOR_DC:
            z = torch.empty([real_batch_sz, MAX_SEQ_LEN, 100]).uniform_() # DCGAN默认100个输入特征
        else:
            z = torch.empty([real_batch_sz, MAX_SEQ_LEN, num_feats]).uniform_() # random vector 随机生成噪声给生成器
        # feed inputs to generator
        g_feats, _ = model['g'](z, g_states)

        # calculate loss, backprop, and update weights of G
        if isinstance(criterion['g'], GLoss):
            d_logits_gen, _, _ = model['d'](g_feats, d_state)
            loss['g'] = criterion['g'](d_logits_gen)
        else: # feature matching
            # feed real and generated input to discriminator
            _, d_feats_real, _ = model['d'](batch_pic_list, d_state)
            _, d_feats_gen, _ = model['d'](g_feats, d_state)
            loss['g'] = criterion['g'](d_feats_real, d_feats_gen)

        #加入自定义 亮斑面积变化损失
        if BRIGHT_LOSS:
            loss['g'] = loss['g'] + criterion['bright'](g_feats,batch_pic_list)

        if not freeze_g:
            loss['g'].backward()
            nn.utils.clip_grad_norm_(model['g'].parameters(), max_norm=MAX_GRAD_NORM)
            optimizer['g'].step()

        #### DISCRIMINATOR ####
        if not freeze_d:
            optimizer['d'].zero_grad()
        # feed real and generated input to discriminator
        d_logits_real, _, _ = model['d'](batch_pic_list, d_state)
        # need to detach from operation history to prevent backpropagating to generator
        d_logits_gen, _, _ = model['d'](g_feats.detach(), d_state)
        # calculate loss, backprop, and update weights of D
        loss['d'] = criterion['d'](d_logits_real, d_logits_gen)
        if not freeze_d:
            loss['d'].backward()
            nn.utils.clip_grad_norm_(model['d'].parameters(), max_norm=MAX_GRAD_NORM)
            optimizer['d'].step()

        g_loss_total += loss['g'].item()
        d_loss_total += loss['d'].item()
        num_corrects += (d_logits_real > 0.5).sum().item() + (d_logits_gen < 0.5).sum().item()
        num_sample += real_batch_sz

        # fetch next batch
        '''batch_meta,'''
        batch_pic_list = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, datasetclass = 'train')

    g_loss_avg, d_loss_avg = 0.0, 0.0
    d_acc = 0.0
    if num_sample > 0:
        g_loss_avg = g_loss_total / num_sample
        d_loss_avg = d_loss_total / num_sample
        d_acc = 100 * num_corrects / (2 * num_sample) # 2 because (real + generated)

    return model, g_loss_avg, d_loss_avg, d_acc


def run_validation(model, criterion, dataloader):
    ''' Run single validation epoch
    '''
    num_feats = dataloader.num_feats
    dataloader.rewind(datasetclass = 'valid')
    batch_pic_list = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, datasetclass = 'valid')

    model['g'].eval()
    model['d'].eval()

    g_loss_total = 0.0
    d_loss_total = 0.0
    num_corrects = 0
    num_sample = 0

    while batch_pic_list.size(0) is not 0:

        real_batch_sz = batch_pic_list.size(0)

        # initial states
        g_states = model['g'].init_hidden(real_batch_sz)
        d_state = model['d'].init_hidden(real_batch_sz)

        #### GENERATOR ####
        # prepare inputs
        if GENERATOR_DC:
            z = torch.empty([real_batch_sz, MAX_SEQ_LEN, 100]).uniform_() # DCGAN默认100个输入特征
        else:
            z = torch.empty([real_batch_sz, MAX_SEQ_LEN, num_feats]).uniform_() # random vector 随机生成噪声给生成器

        # feed inputs to generator
        g_feats, _ = model['g'](z, g_states)
        # feed real and generated input to discriminator
        d_logits_real, d_feats_real, _ = model['d'](batch_pic_list, d_state)
        d_logits_gen, d_feats_gen, _ = model['d'](g_feats, d_state)
        # calculate loss
        if isinstance(criterion['g'], GLoss):
            g_loss = criterion['g'](d_logits_gen)
        else: # feature matching
            g_loss = criterion['g'](d_feats_real, d_feats_gen)

        #加入自定义 亮斑面积变化损失
        if BRIGHT_LOSS:
            loss['g'] = loss['g'] + criterion['bright'](g_feats,batch_pic_list)

        d_loss = criterion['d'](d_logits_real, d_logits_gen)

        g_loss_total += g_loss.item()
        d_loss_total += d_loss.item()
        num_corrects += (d_logits_real > 0.5).sum().item() + (d_logits_gen < 0.5).sum().item()
        num_sample += real_batch_sz

        # fetch next batch
        '''batch_meta,''' 
        batch_pic_list = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, datasetclass = 'valid')

    g_loss_avg, d_loss_avg = 0.0, 0.0
    d_acc = 0.0
    if num_sample > 0:
        g_loss_avg = g_loss_total / num_sample
        d_loss_avg = d_loss_total / num_sample
        d_acc = 100 * num_corrects / (2 * num_sample) # 2 because (real + generated)

    return g_loss_avg, d_loss_avg, d_acc


def run_epoch(model, optimizer, criterion, dataloader, ep, num_ep,
              freeze_g=False, freeze_d=False, pretraining=False):
    ''' Run a single epoch
    '''
    model, trn_g_loss, trn_d_loss, trn_acc = \
        run_training(model, optimizer, criterion, dataloader, freeze_g=freeze_g, freeze_d=freeze_d)

    val_g_loss, val_d_loss, val_acc = run_validation(model, criterion, dataloader)

    if pretraining:
        print("Pretraining Epoch %d/%d " % (ep+1, num_ep), "[Freeze G: ", freeze_g, ", Freeze D: ", freeze_d, "]")
    else:
        print("Epoch %d/%d " % (ep+1, num_ep), "[Freeze G: ", freeze_g, ", Freeze D: ", freeze_d, "]")

    print("\t[Training] G_loss: %0.8f, D_loss: %0.8f, D_acc: %0.2f\n"
          "\t[Validation] G_loss: %0.8f, D_loss: %0.8f, D_acc: %0.2f" %
          (trn_g_loss, trn_d_loss, trn_acc,
           val_g_loss, val_d_loss, val_acc))


    '''
        每次Epoch训练结束后 向生成器G输入一个BATCH的噪声 生成一组图像序列
    '''
    g_states = model['g'].init_hidden(1)
    num_feats = dataloader.num_feats

    if GENERATOR_DC:
        z = torch.empty([1, MAX_SEQ_LEN, 100]).uniform_() # DCGAN默认100个输入特征
    else:
        z = torch.empty([1, MAX_SEQ_LEN, num_feats]).uniform_() # random vector 随机生成噪声给生成器

    if torch.cuda.is_available():
        z = z.cuda()
        model['g'].cuda()

    model['g'].eval()
    g_feats, _ = model['g'](z, g_states)
    pic_data = g_feats.squeeze().cpu()
    pic_data = pic_data.detach().numpy()

    if pretraining == True:
        if freeze_g == True:
            dataloader.save_data("pretraining_d_"+str(ep),pic_data)
        else:
            dataloader.save_data("pretraining_g_"+str(ep),pic_data)
    else:
        dataloader.save_data("trainning_"+str(ep),pic_data)

    return model, trn_acc


def main(args):
    ''' Training sequence
    '''
    dataloader = Dataloader_Uv.DataLoader()
    num_feats = dataloader.num_feats                                                   

    # First checking if GPU is available
    train_on_gpu = torch.cuda.is_available()
    if train_on_gpu:
        print('Training on GPU.')
    else:
        print('No GPU available, training on CPU.')

    model = {
        'g': Generator_DC(num_feats=100, use_cuda=train_on_gpu) if args.generator_DC else Generator(num_feats, use_cuda=train_on_gpu),
        'd': Discriminator(num_feats, use_cuda=train_on_gpu)
    }


    ''' 
        optimizer 优化器(SGD / Adam)
        lr:学习率  momentum:动量因子
    '''
    if args.use_sgd:                                                                                
        optimizer = {
            'g': optim.SGD(model['g'].parameters(), lr=args.g_lrn_rate, momentum=0.9),
            'd': optim.SGD(model['d'].parameters(), lr=args.d_lrn_rate, momentum=0.9)
        }
    else:
        optimizer = {
            'g': optim.Adam(model['g'].parameters(), args.g_lrn_rate),
            'd': optim.Adam(model['d'].parameters(), args.d_lrn_rate)
        }

    '''
        Gloss = feature_matching? (MSE):(-log(p))
        Dloss = -(ylog(p) + (1-y)log(1-p))
    '''
    criterion = {
        'g': nn.MSELoss(reduction='sum') if args.feature_matching else GLoss(),
        'd': DLoss(args.label_smoothing),
        'bright': BrightLoss()
    }

    '''
        加载训练过的模型文件（默认不加载模型，重头训练）
    '''
    if args.load_g:
        ckpt = torch.load(os.path.join(CKPT_DIR, G_FN))
        model['g'].load_state_dict(ckpt)
        print("Continue training of %s" % os.path.join(CKPT_DIR, G_FN))

    if args.load_d:
        ckpt = torch.load(os.path.join(CKPT_DIR, D_FN))
        model['d'].load_state_dict(ckpt)
        print("Continue training of %s" % os.path.join(CKPT_DIR, D_FN))

    if train_on_gpu:
        model['g'].cuda()
        model['d'].cuda()

    '''
        预训练（默认预训练）
        分别对D、G单独训练，各自训练次数可不等
    '''
    if not args.no_pretraining:                                                                     
        for ep in range(args.d_pretraining_epochs):
            model, _ = run_epoch(model, optimizer, criterion, dataloader,
                              ep, args.d_pretraining_epochs, freeze_g=True, pretraining=True)

        for ep in range(args.g_pretraining_epochs):
            model, _ = run_epoch(model, optimizer, criterion, dataloader,
                              ep, args.g_pretraining_epochs, freeze_d=True, pretraining=True)

    
    '''
        作Epoch次训练：在run_epoch()中带train、validation各一个
    '''
    freeze_d = False
    for ep in range(args.num_epochs):
        # if ep % args.freeze_d_every == 0:
        #     freeze_d = not freeze_d

        model, trn_acc = run_epoch(model, optimizer, criterion, dataloader, ep, args.num_epochs, freeze_d=freeze_d)
        if args.conditional_freezing:
            # conditional freezing
            freeze_d = False
            if trn_acc >= 95.0:
                freeze_d = True

    '''
        是否保留训练的模型结果（默认保留）
    '''
    if not args.no_save_g:
        torch.save(model['g'].state_dict(), os.path.join(CKPT_DIR, G_FN))
        print("Saved generator: %s" % os.path.join(CKPT_DIR, G_FN))

    if not args.no_save_d:
        torch.save(model['d'].state_dict(), os.path.join(CKPT_DIR, D_FN))
        print("Saved discriminator: %s" % os.path.join(CKPT_DIR, D_FN))


if __name__ == "__main__":

    ARG_PARSER = ArgumentParser()
    ARG_PARSER.add_argument('--load_g', action='store_true')                                #加载G模型文件    默认为否
    ARG_PARSER.add_argument('--load_d', action='store_true')                                #加载D模型文件    默认为否
    ARG_PARSER.add_argument('--no_save_g', action='store_true')                             #不保留G模型？    默认为否（默认保留）
    ARG_PARSER.add_argument('--no_save_d', action='store_true')                             #不保留D模型？    默认为否（默认保留）       

    ARG_PARSER.add_argument('--num_epochs', default=300, type=int)                          #训练次数（每一次都重头遍历数据集训练）
    ARG_PARSER.add_argument('--seq_len', default=100, type=int)                             #序列长度（默认100帧图像作一个序列）
    ARG_PARSER.add_argument('--batch_size', default=1, type=int)                            #序列数量（默认取1个序列作一次梯度更新）
    ARG_PARSER.add_argument('--g_lrn_rate', default=0.001, type=float)                      #G学习率   默认为0.001
    ARG_PARSER.add_argument('--d_lrn_rate', default=0.001, type=float)                      #D学习率   默认为0.001

    ARG_PARSER.add_argument('--no_pretraining', action='store_true')                        #不预训练？          默认为否（默认预训练）
    ARG_PARSER.add_argument('--g_pretraining_epochs', default=5, type=int)                  #G的预训练Epoch数量   默认为5
    ARG_PARSER.add_argument('--d_pretraining_epochs', default=5, type=int)                  #D的预训练Epoch数量   默认为5
    # ARG_PARSER.add_argument('--freeze_d_every', default=5, type=int)                      #每隔n个Epoch固定D
    ARG_PARSER.add_argument('--use_sgd', action='store_true')                               #优化器采用SGD/Adam   默认用Adam
    ARG_PARSER.add_argument('--conditional_freezing', action='store_true')                 #根据条件选择冻结
    ARG_PARSER.add_argument('--label_smoothing', action='store_true')                       #标签平滑             默认为否
    ARG_PARSER.add_argument('--feature_matching', action='store_true')                      #标签匹配             默认为否
    ARG_PARSER.add_argument('--bright_loss', action='store_true')                           #自定义亮斑面积变化损失   默认为否
    ARG_PARSER.add_argument('--generator_DC', action='store_true')                          #替换生成网络结构   默认为否
    ARGS = ARG_PARSER.parse_args()
    if ARGS.generator_DC:
        G_FN = 'gan_DC_g.pth'
    MAX_SEQ_LEN = ARGS.seq_len
    BATCH_SIZE = ARGS.batch_size
    BRIGHT_LOSS = ARGS.bright_loss
    GENERATOR_DC = ARGS.generator_DC
    main(ARGS)
