import h5py
from torch.autograd import Variable

from loadh5 import load_hdf5
from Net1 import Generator,Spatial_Discriminator,Temporal_Discriminator,weights_init,NowcastingSampler
from utils import *
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from Block_Stack import *
import opt
import itertools
from torch.utils.data import DataLoader
import torch
import matplotlib.pyplot as plt

RELU = nn.ReLU()
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if (torch.cuda.is_available() and opt.ngpu > 0) else 'cpu')
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
torch.manual_seed(0)
mm = MinMaxScaler()
ma = MaxAbsScaler()


FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
dataset = load_hdf5('Data/train.h5', keys=['data'])
Dataset = np.array(dataset['data'], dtype='float32')
for i in range(Dataset.shape[0]):
    Dataset[i] = mm.fit_transform(Dataset[i])


Dataset = torch.from_numpy(Dataset).float()
Dataset.unsqueeze(0)
Dataset = torch.unsqueeze(Dataset, 0)
Dataset = Dataset.permute(1,0,2,3)

Data = DataLoader(Dataset, batch_size=opt.batch_size, shuffle=False, drop_last=False, num_workers=0)


#将网络初始化
#生成器(先初始化条件堆栈-潜在堆栈-sampler(G)
conditioning_stack = Context_Condition_Stack(1,768,4).to(device)#生成调节张量
laten_stack = Latent_Condition_Stack((8,8,8),768,use_attention=True).to(device)  #生成潜在张量

G = NowcastingSampler(18,768,768,1).to(device)#生成器
#G.apply(weights_init)
#俩判别器
#空间判别器
s_D = Spatial_Discriminator(1,8,4).to(device)
#s_D.apply(weights_init)
#时间判别器
t_D = Temporal_Discriminator(1,128,3).to(device)
#t_D.apply(weights_init)

#优化器
optimizerD_s = torch.optim.Adam(s_D.parameters(), lr=opt.lr, betas=(0.5, 0.999))
optimizerD_t = torch.optim.Adam(t_D.parameters(), lr=opt.lr, betas=(0.5, 0.999))
optimizerG = torch.optim.Adam(G.parameters(), lr=opt.lr_G, betas=(0.5, 0.999))


print("Starting Training Loop...")
for epoch in range(opt.num_epoch):
    for i,data in enumerate(Data):

        batch_size = data.shape[0]
        data = data.to(device)
        #X_real_second_half,X_real_first_half = data.split([18,4],0)
        X_real_first_half,X_real_second_half,test = data.split([4,18,2],0)
        num_img = X_real_second_half.size(0)
        real_second = torch.squeeze(X_real_second_half)
        real_second_5 = torch.unsqueeze(X_real_second_half, 0)

        #G.train()
        real_label = Variable(torch.ones(1),requires_grad=True).cuda()  # 定义真实的图片label为1
        fake_label = Variable(torch.zeros(1),requires_grad=True).cuda()  # 定义假的图片的label为0

        # # 判别器真实值的表示矩阵（全为1）
        # valid = Variable(Tensor(np.ones((real.size(0), *netD_A.output_shape))), requires_grad=False)
        # # 判别器mask的表示矩阵（全0）
        # fake_label = Variable(Tensor(np.zeros((real.size(0), *netD_B.output_shape))), requires_grad=False)

        """训练生成器"""
        optimizerG.zero_grad()
        #先生成laten-Z
        laten = torch.unsqueeze(X_real_first_half, 0)
        #z = laten_stack(laten)  #(1,768,8,8)
        z = Variable(Tensor(np.random.normal(0, 1, (1,768, 8, 8))))

        #通过调节堆栈生成调节输入
        context = torch.unsqueeze(X_real_first_half, 0)
        condition_state = conditioning_stack(context)#注：这里生成的是调节数据的一个元组
        #生成预测
        forcast = G(condition_state,z) #1x18x1x256x256

        fake = torch.squeeze(forcast,0)
        fake_img = torch.squeeze(forcast)#降维，将“1”全给去掉
        fake_t = torch.cat((X_real_first_half, fake), dim=0)  # 这里由于时间判别器需要所有时序。
        #计算生成器损失
        fake_out_s = s_D(forcast).detach()
        fake_out_t = t_D(fake_t).detach()

        s_g_loss = opt.criterion(fake_out_s,real_label)
        t_g_loss = opt.criterion(fake_out_t,real_label)
        # s_g_loss.backward()
        # t_g_loss.backward()

        r_loss_sum = 0
        # result = torch.mul(torch.mean(fake_img-real_second),torch.mean(real_second))
        # result = result.detach()
        #r_loss_sum = (1 / 18 * 256 * 256) * opt.lamda * result
        # for j in range(fake_img.shape[0]):
        #     result = torch.mul((fake_img[j] - real_second[j]), real_second[j])
        #
        #     result=result.detach()
        #
        #     r_loss = (1 / 18 * 256 * 256) * opt.lamda * Norm_1_torch(result)
        #     r_loss_sum = r_loss_sum + r_loss

        GG_loss =s_g_loss + t_g_loss #- (r_loss_sum/(256*256*18))
        GG_loss.backward()

        optimizerG.step()
        """训练生成器结束"""
        """训练判别器"""
        #空间判别器
        #with torch.no_grad():
        optimizerD_s.zero_grad()
        s_out_real = s_D(real_second_5).detach()
        s_out_fake = s_D(forcast).detach()
        s_real_loss = opt.criterionG(s_out_real, real_label)
        s_fake_loss = opt.criterionG(s_out_fake,fake_label)
        s_D_loss =(s_real_loss + s_fake_loss)/2
        #s_D_loss.backward(retain_graph=True)
        #optimizerD_s.step()

        #时间判别器
        optimizerD_t.zero_grad()
        t_out_real = t_D(data).detach()
        t_out_fake = t_D(fake_t).detach()
        t_real_loss = opt.criterion(t_out_real,real_label)
        t_fake_label = opt.criterion(t_out_fake,fake_label)
        t_D_loss = (t_real_loss + t_fake_label) / 2
        #t_D_loss.backward()
        #optimizerD_t.step()
        """训练判别器结束"""
        D_loss = s_D_loss + t_D_loss
        D_loss.backward()
        # s_loss = RELU(1-s_real_loss) + RELU(1+s_fake_loss)
        # t_loss = RELU(1-t_real_loss) + RELU(1+t_fake_label)
        optimizerD_t.step()
        optimizerD_s.step()

        last_fake = G(condition_state,z).detach().cpu()
        last_fake = torch.squeeze(last_fake).numpy()
        #last_fake = torch.squeeze(last_fake,1)
        d = last_fake.shape
        if (i == 0):
            # fake = ss.inverse_transform(fake)
            Dataset22 = last_fake

        if (i != 0):
            dim = Dataset22.shape
            Dataset22 = np.append(Dataset22, last_fake)
            # print(dim)
            Dataset22 = Dataset22.reshape(dim[0] + d[0], dim[1], dim[2])
    print('Epoch [{}/{}], g_loss: {:.4f}, s_d_loss: {:.4f}, t_d_loss: {:.4f}'
          .format(epoch, opt.num_epoch, GG_loss.item(), D_loss.item(), D_loss.item()
                  ))
    if (epoch < 40):
        DE = mm.inverse_transform(Dataset22[20])
        DE = Dataset22[20]
        print(DE)
    # lr_scheduler_G.step()
    # lr_scheduler_D_A.step()
    # lr_scheduler_D_B.step()
    if (epoch % 50 == 0):
        DE = mm.inverse_transform(Dataset22[20])
        DE = Dataset22[20]
        print(DE)
        plt.imshow(DE)
        plt.axis('off')
        plt.savefig('images/fake_{}'.format(epoch))
    if (epoch == (opt.num_epoch - 1)):
        G.eval()
        torch.save(G, 'Model/Train.pth')
        for i in range(len(Dataset22)):
            Dataset22[i] = mm.inverse_transform(Dataset22[i])
        f2 = h5py.File('shengcheng.h5', 'w')
        f2['data'] = Dataset22
        print(Dataset22[50])
        # X_real_first_half = Variable(torch.randn(batch_size, 4, 256, 256)).cuda()
        # X_real_second_half = Variable(torch.randn(batch_size, 18, 256, 256)).cuda()
        # num_img = X_real_second_half.size(0)



