#-*- coding:utf-8 -*-
# author:贤宁
# datetime:2021/11/9 15:43
# software: PyCharm
import torch
from torch import nn
import torch.nn.functional as F
from Block_Stack import *
from ConvGRU import ConvGRU
from torchvision.transforms import RandomCrop
from GRU_real import ConvGRUCell


"""生成器:分为调节栈、采样器"""
"""
这里实现的生成器是直接生成预测值的，由于调节栈与采样器都实现了,因此只需要调用其值即可。
"""

#权值初始化
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)



#这里先将所有的ConvGRU给串起来
class NowcastingSampler(nn.Module):
    def __init__(self,
        forecast_steps,
        latent_channels,
        context_channels,
        output_channels):
        """
        :param forecast_steps: (18
        :param latent_channels: (768
        :param context_channels: (384
        :param output_channels: (1
        """
        super(NowcastingSampler, self).__init__()
        self.forecast_steps = forecast_steps
        self.convGRU1 = ConvGRUCell(latent_channels,context_channels,3)
        self.g1 = G_Block(latent_channels, latent_channels // 2)
        self.convGRU2 = ConvGRUCell(
            input_size=latent_channels // 2,
            hidden_size=context_channels // 2,
            kernel_size=3,
        )
        self.g2 = G_Block(latent_channels // 2, latent_channels // 4)
        self.convGRU3 = ConvGRUCell(
            input_size=latent_channels // 4,
            hidden_size=context_channels // 4,
            kernel_size=3,
        )
        self.g3 = G_Block(latent_channels // 4, latent_channels // 8)
        self.convGRU4 = ConvGRUCell(
            input_size=latent_channels // 8,
            hidden_size=context_channels // 8,
            kernel_size=3,
        )
        self.g4 = G_Block(latent_channels // 8, latent_channels // 16)
        self.bn = torch.nn.BatchNorm2d(latent_channels // 16)
        self.relu = torch.nn.LeakyReLU(0.2,inplace=True)
        self.conv_1x1 = spectral_norm(
            torch.nn.Conv2d(
                in_channels=latent_channels // 16, out_channels=4 * output_channels, kernel_size=1
            )
        )
        self.D2S = nn.PixelShuffle(upscale_factor=2)
        stacks = nn.ModuleDict()
        """
        这里用到的是torch里面常见的两个容器：ModuleDict与ModuleList
        第一个主要是做索引(可选择的网络层），第二个强调一个迭代性，即通过循环实现大量重复网络的构建。
        """
        for i in range(forecast_steps):
            stacks[f"forecast_{i}"] = nn.ModuleList(
                [
                    self.convGRU1,
                    self.g1,
                    self.convGRU2,
                    self.g2,
                    self.convGRU3,
                    self.g3,
                    self.convGRU4,
                    self.g4,
                    self.bn,
                    self.relu,
                    self.conv_1x1,
                    self.D2S,
                ]
            )
        self.stacks = stacks

    def forward(self,condition_state,latent_dim):
        """
        :param condition_state: 这里是从Context_Condition_Stack输出
        :param latent_dim:   Latent_Condition_Stack的输出，用作ConvGRU的输入
        :return: 预测图
        """
        forecasts = []
        init_states = list(
            condition_state
        )
        #这里是将原laten扩展到batch的大小
        latent_dim = torch.cat(init_states[0].size()[0] * [latent_dim])
        latent_dim = torch.unsqueeze(latent_dim, dim=1)  #在1的位置添加一个维度’1‘
        #print(latent_dim.size())
        for i in range(self.forecast_steps):
            #采样器开始
            #ConvGRU1
            #print(latent_dim.size())
            #print(init_states[3].size())
            x = self.stacks[f"forecast_{i}"][0](latent_dim,prev_state=init_states[3])
            #print(x.size())
            init_states[3] = torch.squeeze(x,dim=0)
            x = torch.squeeze(x,dim=0)
            # print(x.size())
            #g1
            #print(self.stacks[f"forecast_{i}"][1])
            x = self.stacks[f"forecast_{i}"][1](x)
            x = torch.unsqueeze(x,dim=1)
            #print(x.size())
            """
            上面的五行代码就是采样器的开始每个步骤的缩影。
            首先laten与init_states[3]输入到ConvGRU1中，并随即更新init_states[3]以便于迭代，
            在将得到的输出降维后输入到G_block中得到的值再升维作为下一个(ConvGru2)的laten输入
            之后的步骤都与开始的这几步相似
            """
            #ConvGRU2
            x = self.stacks[f"forecast_{i}"][2](x,prev_state = init_states[2])
            init_states[2] = torch.squeeze(x,dim=0)
            x = torch.squeeze(x,dim=0)
            #g2
            x = self.stacks[f"forecast_{i}"][3](x)
            x = torch.unsqueeze(x, dim=1)
            # ConvGRU3
            x = self.stacks[f"forecast_{i}"][4](x, prev_state=init_states[1])
            init_states[1] = torch.squeeze(x, dim=0)
            x = torch.squeeze(x, dim=0)
            # g3
            x = self.stacks[f"forecast_{i}"][5](x)
            x = torch.unsqueeze(x, dim=1)
            # ConvGRU4
            x = self.stacks[f"forecast_{i}"][6](x, prev_state=init_states[0])
            init_states[0] = torch.squeeze(x, dim=0)
            x = torch.squeeze(x, dim=0)
            # g4
            x = self.stacks[f"forecast_{i}"][7](x)
            # BN
            x = self.stacks[f"forecast_{i}"][8](x)
            # LeakyRelu
            x = self.stacks[f"forecast_{i}"][9](x)
            # Conv 1x1
            x = self.stacks[f"forecast_{i}"][10](x)
            # D2S
            x = self.stacks[f"forecast_{i}"][11](x)
            forecasts.append(x)
            #print(x.size())
        #将预测值转化为张量表示
        forecasts = torch.stack(forecasts, dim=1)

        return forecasts
#生成器
class Generator(nn.Module):
    def __init__(self,
        conditioning_stack,
        latent_stack,
        sampler):
        """
        :param conditioning_stack:torch.nn.Module
        :param latent_stack:
        :param sampler:
        """
        super().__init__()
        self.conditioning_stack = conditioning_stack
        self.latent_stack = latent_stack
        self.sampler = sampler

    def forward(self, x):
        conditioning_states = self.conditioning_stack(x)
        latent_dim = self.latent_stack(x)
        x = self.sampler(conditioning_states, latent_dim)
        return x
"""生成器----end"""

"""判别器"""
#空间判别器
class Spatial_Discriminator(nn.Module):
    def __init__(self,input_channels,num_timesteps,num_layers,conv_type = "standard"):
        """
        :param input_channels: 1
        :param num_timesteps: 根据原文描述，随机取18个前置时间中8个(8/18)
        :param num_layers:Dblock的层数
        :param conv_type:
        """
        super(Spatial_Discriminator,self).__init__()
        self.num_timesteps = num_timesteps
        #平均池化2x2
        self .mean_pool = nn.AvgPool2d(2)
        self.conv_type = conv_type
        self.Add_C= nn.Conv2d(input_channels,4,3)
        internal_chn = 24
        self.d1 = D_Block(4*input_channels,2 * internal_chn * input_channels,first_relu=False,conv_type=conv_type,
        )
        self.intermediate_dblocks = torch.nn.ModuleList()

        for _ in range(num_layers):
            internal_chn *= 2
            self.intermediate_dblocks.append(
                D_Block(internal_chn * input_channels, 2 * internal_chn * input_channels,conv_type=conv_type,)
            )
        self.d6 = D_Block(
            2 * internal_chn * input_channels,2 * internal_chn * input_channels,keep_same_output=True,conv_type=conv_type,
        )

        # Spectrally normalized linear layer for binary classification
        self.fc = spectral_norm(torch.nn.Linear(2 * internal_chn * input_channels, 1))
        self.relu = nn.LeakyReLU(0.2,inplace=True)

    def forward(self,x):
        #随机选择8个索引值
        idxs = torch.randint(low=0, high=x.size()[1], size=(self.num_timesteps,))
        representations = []
        for idx in idxs:
            #print(x[:, idx, :, :, :].size())
            rep = self.mean_pool(x[:, idx, :, :, :])
            rep = F.interpolate(
                rep, mode="trilinear" if self.conv_type == "3d" else "bilinear", scale_factor=0.5
            )   #下采样
            rep = self.Add_C(rep)
            # rep = self.S2D(rep)
            rep = self.d1(rep)
            for d in self.intermediate_dblocks:
                rep = d(rep)
            rep = self.d6(rep)
            #print(rep.size())
            rep = torch.sum(rep.view(rep.size(0), rep.size(1), -1), dim=2)
            representations.append(rep)
        #对8个进行堆叠
        x = torch.stack(representations, dim=0).sum(dim=0)
        x = self.fc(x)
        x = self.relu(x)
        x = torch.squeeze(x,0)
        return x

    #时间判别器
class Temporal_Discriminator(nn.Module):
    def __init__(self,input_channels,crop_size,num_layers,conv_type="standard"):
        """
        :param input_channels:1or12?
        :param crop_size: 随机在序列中随机裁剪大小,论文中给的是128x128
        :param num_layers:3
        :param conv_type:
        """
        super(Temporal_Discriminator, self).__init__()
        self.transform = RandomCrop(crop_size)
        self.S2D = nn.PixelShuffle(upscale_factor=0.5)
        internal_chn = 48
        self.d1 = D_Block(4 * input_channels,internal_chn * input_channels,conv_type="3d",first_relu=False)
        self.d2 = D_Block(internal_chn * input_channels,2 * internal_chn * input_channels,conv_type="3d")
        self.intermediate_dblocks = torch.nn.ModuleList()
        for _ in range(num_layers):
            internal_chn *= 2
            self.intermediate_dblocks.append(
                D_Block(internal_chn * input_channels,2 * internal_chn * input_channels,conv_type=conv_type)
            )

        self.d_last = D_Block(2 * internal_chn * input_channels,2 * internal_chn * input_channels,keep_same_output=True,conv_type=conv_type)
        self.fc = spectral_norm(torch.nn.Linear(2 * internal_chn * input_channels, 1))
        self.relu = nn.LeakyReLU(0.2,inplace=True)
        self.sigmoid= nn.Sigmoid()

    def forward(self,x):
        x = self.transform(x)
        x = torch.unsqueeze(x,0)
        x = F.interpolate(
            x, size=[4, 64, 64]
        )
        #x = self.S2D(x)
        x = torch.Tensor.permute(x, dims=(0, 2, 1, 3, 4))
        x = self.d1(x)
        x = self.d2(x)
        x = torch.Tensor.permute(x, dims=(0, 2, 1, 3, 4))
        representations = []
        for idx in range(x.size(1)):
            rep = x[:, idx, :, :, :]
            for d in self.intermediate_dblocks:
                rep = d(rep)
            rep = self.d_last(rep)
            rep = torch.sum(rep.view(rep.size(0), rep.size(1), -1), dim=2)
            representations.append(rep)
        x = torch.stack(representations, dim=0).sum(dim=0)
        x = self.fc(x)
        x = self.relu(x)
        x = torch.squeeze(x,0)
        return x
"""判别器---end"""