from __future__ import print_function
# from six.moves import range
from PIL import Image
import pickle
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn

import torch.optim as optim
import os
import time

import numpy as np
import torchfile
from model import *
from miscc.pt_config import cfg
from miscc.utils import mkdir_p
from miscc.utils import weights_init
from miscc.utils import save_img_results, save_model, save_img_predicts
from miscc.utils import KL_loss
from miscc.utils import compute_discriminator_loss, compute_generator_loss
from sentence_transformers import SentenceTransformer
from tensorboardX import summary
from tensorboardX import FileWriter

class GANTrainer(object):
    def __init__(self, output_dir):
        if cfg.TRAIN.FLAG:
            # 创建一些用于存放结果的文件夹
            self.model_dir = os.path.join(output_dir, 'Model')
            self.image_dir = os.path.join(output_dir, 'Image')
            self.log_dir = os.path.join(output_dir, 'Log')
            mkdir_p(self.model_dir)
            mkdir_p(self.image_dir)
            mkdir_p(self.log_dir)
            # 创建一个用于写log的类
            self.summary_writer = FileWriter(self.log_dir)

        self.max_epoch = cfg.TRAIN.MAX_EPOCH
        self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL

        # 以下利用GPU加速
        # 获取现在有的GPU
        s_gpus = cfg.GPU_ID.split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)
        # 每一个gpu一个batch
        self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
        # 设置在哪个GPU上训练
        torch.cuda.set_device(self.gpus[0])
        # cudnn在计算卷积的算法中选择一个最快的
        cudnn.benchmark = True

    # ############# For training stageI GAN #############
    def load_network_stageI(self):
        # 加载获取阶段一的生成器判别器
        from model import Stage1_G, Stage1_D
        netG = Stage1_G()
        # 初始化权重
        netG.apply(weights_init)
        print(netG)
        netD = Stage1_D()
        netD.apply(weights_init)
        print(netD)

        if cfg.NET_G != '':
            state_dict = \
                torch.load(cfg.NET_G,
                           map_location=lambda storage, loc: storage)
            # 将模型参数加载进去
            netG.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_G)
        if cfg.NET_D != '':
            state_dict = \
                torch.load(cfg.NET_D,
                           map_location=lambda storage, loc: storage)
            netD.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_D)
        if cfg.CUDA:
            # 将所有参数移到GPU上面
            netG.cuda()
            netD.cuda()
        return netG, netD

    # ############# For training stageII GAN  #############
    def load_network_stageII(self):
        from model import Stage1_G, Stage2_G, Stage2_D

        stage1_G = Stage1_G()
        netG = Stage2_G(stage1_G)
        netG.apply(weights_init)
        print(netG)
        if cfg.NET_G != '':
            state_dict = \
                torch.load(cfg.NET_G,
                           map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_G)
        elif cfg.STAGE1_G != '':
            state_dict = \
                torch.load(cfg.STAGE1_G,
                           map_location=lambda storage, loc: storage)
            netG.Stage1_G.load_state_dict(state_dict)
            print('Load from: ', cfg.STAGE1_G)
        else:
            print("Please give the Stage1_G path")
            return

        netD = Stage2_D()
        netD.apply(weights_init)
        if cfg.NET_D != '':
            state_dict = \
                torch.load(cfg.NET_D,
                           map_location=lambda storage, loc: storage)
            netD.load_state_dict(state_dict)
            print('Load from: ', cfg.NET_D)
        print(netD)

        if cfg.CUDA:
            netG.cuda()
            netD.cuda()
        return netG, netD

    def train(self, data_loader, stage = 1):
        """
        训练过程
        """

        # 决定哪个模型
        if stage == 1:
            netG, netD = self.load_network_stageI()
        else:
            netG, netD = self.load_network_stageII()

        nz = cfg.Z_DIM # 100
        batch_size = self.batch_size
        noise = torch.FloatTensor(batch_size, nz).requires_grad_(False)
        fixed_noise = torch.FloatTensor(batch_size, nz).normal_(0, 1).requires_grad_(False)
        real_labels = torch.FloatTensor(batch_size).fill_(1).requires_grad_(False)
        fake_labels = torch.FloatTensor(batch_size).fill_(0).requires_grad_(False)
        # 如果需要，转移到GPU上面
        if cfg.CUDA:
            noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
            real_labels, fake_labels = real_labels.cuda(), fake_labels.cuda()

        # 生成器判别器的learning rate以及他们的衰减率
        generator_lr = cfg.TRAIN.GENERATOR_LR
        discriminator_lr = cfg.TRAIN.DISCRIMINATOR_LR
        lr_decay_step = cfg.TRAIN.LR_DECAY_EPOCH

        # 判别器的优化器
        optimizerD = optim.Adam(
                                netD.parameters(),
                                lr=cfg.TRAIN.DISCRIMINATOR_LR, 
                                betas=(0.5, 0.999)
                                )
        # 生成器的优化器，注意，如果是阶段一，那么所有的参数都需要优化，但是如果是阶段二，由于阶段二是由阶段一生成图片作为输入，所以在训练
        # 阶段二生成器时，不需要优化阶段一的参数，所以此时不加入阶段一生成器的参数。
        netG_para = []
        for p in netG.parameters():
            if p.requires_grad:
                netG_para.append(p)
        optimizerG = optim.Adam(netG_para,
                                lr=cfg.TRAIN.GENERATOR_LR,
                                betas=(0.5, 0.999))
        count = 0
        for epoch in range(self.max_epoch):
            start_t = time.time()
            # 每隔一些代，更新learning rate
            if epoch % lr_decay_step == 0 and epoch > 0:
                generator_lr *= 0.5
                for param_group in optimizerG.param_groups:
                    param_group['lr'] = generator_lr
                discriminator_lr *= 0.5
                for param_group in optimizerD.param_groups:
                    param_group['lr'] = discriminator_lr
            
            # enumerate(x, [start = 0])返回一个可迭代对象，比如[(0, data_group0), (1, data_group1)]
            print("epoch 1 processing.")
            for i, data in enumerate(data_loader, 0):
                # if hasattr(torch.cuda, 'empty_cache'):
	            #     torch.cuda.empty_cache()

                # 此时i相当于计数的，没啥太大用，data是一个tuple，包含image和embedding:(iamge, embedding)
                print("came in.")
                ######################################################
                # (1) Prepare training data
                ######################################################
                real_img_cpu, txt_embedding = data
                real_imgs = real_img_cpu.requires_grad_(False)
                txt_embedding = txt_embedding.requires_grad_(False)  
                if cfg.CUDA:
                    real_imgs = real_imgs.cuda()
                    txt_embedding = txt_embedding.cuda()

                #######################################################
                # (2) Generate fake images
                ######################################################
                noise.data.normal_(0, 1)
                inputs = (txt_embedding, noise)
                print(np.shape(txt_embedding))
                print(np.shape(noise))
                _, fake_imgs, mu, logvar = \
                    nn.parallel.data_parallel(netG, inputs, self.gpus)

                ############################
                # (3) Update D network
                ###########################
                netD.zero_grad()
                errD, errD_real, errD_wrong, errD_fake = \
                    compute_discriminator_loss(netD, real_imgs, fake_imgs,
                                               real_labels, fake_labels,
                                               mu, self.gpus)
                errD.backward()
                optimizerD.step()
                ############################
                # (2) Update G network
                ###########################
                netG.zero_grad()
                errG = compute_generator_loss(netD, fake_imgs,
                                              real_labels, mu, self.gpus)
                kl_loss = KL_loss(mu, logvar)
                errG_total = errG + kl_loss * cfg.TRAIN.COEFF.KL
                errG_total.backward()
                optimizerG.step()

                count = count + 1
                if i % 100 == 0:
                    summary_D = summary.scalar('D_loss', errD)
                    summary_D_r = summary.scalar('D_loss_real', errD_real)
                    summary_D_w = summary.scalar('D_loss_wrong', errD_wrong)
                    summary_D_f = summary.scalar('D_loss_fake', errD_fake)
                    summary_G = summary.scalar('G_loss', errG)
                    summary_KL = summary.scalar('KL_loss', kl_loss)

                    self.summary_writer.add_summary(summary_D, count)
                    self.summary_writer.add_summary(summary_D_r, count)
                    self.summary_writer.add_summary(summary_D_w, count)
                    self.summary_writer.add_summary(summary_D_f, count)
                    self.summary_writer.add_summary(summary_G, count)
                    self.summary_writer.add_summary(summary_KL, count)

                    # save the image result for each epoch
                    inputs = (txt_embedding, fixed_noise)
                    lr_fake, fake, _, _ = \
                        nn.parallel.data_parallel(netG, inputs, self.gpus)
                    # 保存图片，如果是阶段一生成的64 * 64图片，那么和原图像一起保存，如果是阶段二，那么也保存256 * 256的阶段二图像
                    save_img_results(real_img_cpu, fake, epoch, self.image_dir)
                    if lr_fake is not None:
                        save_img_results(None, lr_fake, epoch, self.image_dir)
            end_t = time.time()
            print('''[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f Loss_KL: %.4f
                     Loss_real: %.4f Loss_wrong:%.4f Loss_fake %.4f
                     Total Time: %.2fsec
                  '''
                  % (epoch, self.max_epoch, i, len(data_loader),
                     errD, errG, kl_loss,
                     errD_real, errD_wrong, errD_fake, (end_t - start_t)))
            # 每隔一些代保存模型快照
            if epoch % self.snapshot_interval == 0:
                # 更改了      
                if stage == 1:
                    s1_model_dir = os.path.join(self.model_dir, "netG1")
                    save_model(netG, netD, epoch, s1_model_dir) # self.model_dir = "output/Model"'
                else:
                    s2_model_dir = os.path.join(self.model_dir, "netG2")
                    save_model(netG, netD, epoch, s2_model_dir) # self.model_dir = "output/Model"'
        #
        save_model(netG, netD, self.max_epoch, self.model_dir)
        #
        self.summary_writer.close()

    def sample(self, datapath, stage = 1):
        # 只用于生成假图片，类似于predict
        if stage == 1:
            netG, _ = self.load_network_stageI()
        else:
            netG, _ = self.load_network_stageII()
        netG.eval()

        # 将经过encoder生成的text embedding加载进来
        # t_file = torchfile.load(datapath)
        # captions_list = t_file.raw_txt
        # embeddings = np.concatenate(t_file.fea_txt, axis=0)
        embedding_path = os.path.join(datapath, 'embeddings.pkl')
        with open(embedding_path, 'rb') as f:
            embeddings = pickle.load(f)
            embeddings = np.array(embeddings)
            print('embeddings: ', embeddings.shape)
        num_embeddings = len(embeddings)
        print('Successfully load sentences from: ', datapath)
        print('Total number of sentences:', num_embeddings)
        print('num_embeddings:', num_embeddings, embeddings.shape)

        # 保存生成的例子的路径
        save_dir = cfg.NET_G[:cfg.NET_G.find('.pth')]
        mkdir_p(save_dir)

        batch_size = np.minimum(num_embeddings, self.batch_size)
        nz = cfg.Z_DIM
        noise = torch.FloatTensor(batch_size, nz).requires_grad_(False)
        if cfg.CUDA:
            noise = noise.cuda()
        count = 0
        while count < num_embeddings:
            if count > 3000:
                break
            iend = count + batch_size
            if iend > num_embeddings:
                iend = num_embeddings
                count = num_embeddings - batch_size
            embeddings_batch = embeddings[count:iend]
            # captions_batch = captions_list[count:iend]
            txt_embedding = torch.FloatTensor(embeddings_batch).requires_grad_(False)
            if cfg.CUDA:
                txt_embedding = txt_embedding.cuda()

            #######################################################
            # (2) Generate fake images 生成假图片
            ######################################################
            noise.data.normal_(0, 1)
            inputs = (txt_embedding, noise)
            _, fake_imgs, mu, logvar = \
                nn.parallel.data_parallel(netG, inputs, self.gpus)
            for i in range(batch_size):
                save_name = '%s/%d.png' % (save_dir, count + i)
                im = fake_imgs[i].data.cpu().numpy()
                im = (im + 1.0) * 127.5
                im = im.astype(np.uint8)
                # print('im', im.shape)
                im = np.transpose(im, (1, 2, 0))
                # print('im', im.shape)
                im = Image.fromarray(im)
                im.save(save_name)
            count += batch_size

    def predict(self, sentences):
        model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
        # sentence_embedding = model.encode(sentence)
        embedding = model.encode(sentences)
        # embedding = [list(embedding)]
        embedding = torch.cuda.FloatTensor(embedding)
        # embedding.cuda()
        stage1_G = Stage1_G()
        netG = stage1_G
        netG.apply(weights_init)
        state_dict = torch.load("output/Model/netG1/netG_epoch_180.pth")
        netG.load_state_dict(state_dict)
        netG.cuda()
        fixed_noise = torch.cuda.FloatTensor(2, cfg.Z_DIM).normal_(0, 1).requires_grad_(False)
        # inputs = (embedding, fixed_noise)
        lr_fake, fake, _, _ = netG.forward(embedding, fixed_noise)

        # save_img_results(None, fake, 2, "try")
        # save_img_results(None, lr_fake, 1, "try")
        if fake != None:
            save_img_predicts(fake, 1, "static")
        if lr_fake != None:
            save_img_predicts(lr_fake, 2, "try")


