"""
这是条件GAN（CGAN）网络的实现。发电机

预测和成像（在这种情况下，x和

y）。这是通过组合参数项和随机噪声以及

使用高斯模糊来平滑轮廓。

鉴别器接收图像和参数向量。这些是

结合并最终测试有效性。

损失仅基于二元交叉熵，即结果的有效性。

使用批处理规范化时遇到问题。目前，它被忽略了。

观察：

-在Conv2D层生成之前，将目标参数与图像串联

不稳定溶液。参数依赖关系似乎存在，但没有获得干净的解决方案。

-Conv2D后，将目标参数与处理后的图像数据连接起来

形状清晰，但未获得参数依赖性（测试1000多个epoch）

-后期串联后的LeakyReLU会产生奇怪的结果

-所有Conv2D层的BN都会产生奇怪的结果。

-D上的BN只产生奇怪的结果

-所有Conv2D层上的SN似乎工作正常，与以前一样没有参数依赖性

-学习率下降似乎略有稳定，但问题仍然无法避免

-将所有参数缩放到正态分布有很大帮助！

-以CD为参数，解决方案仍有噪音。（可能把事情搞砸了……重新运行）

-模式崩溃…添加编码器（看起来还好吗？）

-AE损失下降很快，因此AE学习率下降

指导方针：

-平整工程

-关闭工作，但并非真正需要。。。

-D或G或E上无BN

-所有Conv2D层上的序号

-所有Conv2D和Dense层上的LeakyReLU

机翼数据：

-使用半余弦间距以获得更好的LE分辨率（TODO）

-将SG滤波器作为Lambda层应用于生成器，而不是高斯模糊？
"""

################################################################################
# %% 引入包
################################################################################

import numpy as np

from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate, Activation, Conv2DTranspose
from tensorflow.keras.layers import Reshape, Dense, BatchNormalization, Conv2D
from tensorflow.keras.layers import GaussianNoise, Dropout, LeakyReLU, Flatten, ReLU
from tensorflow.keras.layers import Lambda, ELU
from SNConv2D import SpectralNormalization
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.optimizers import Adam, RMSprop, SGD
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.losses import BinaryCrossentropy
import tensorflow.keras.backend as K

################################################################################
# %% 定义信息类别

################################################################################

class CGAN():

    """
    在发电机中实现曲线平滑的CGAN网络

    """

    ##### 类别初始化
    def __init__(self, DAT_SHP=(64, 2, 1), PAR_DIM=2, LAT_DIM=100, DEPTH=32, LEARN_RATE=0.0002):

        """
        初始化基本设置
        """

        self.DAT_SHP = DAT_SHP
        self.PAR_DIM = PAR_DIM
        self.LAT_DIM = LAT_DIM
        self.DEPTH = DEPTH
        self.init = RandomNormal(mean=0.0, stddev=0.02)
        self.LEARN_RATE = LEARN_RATE
        self.optimizer = Adam(lr=self.LEARN_RATE, beta_1=0.5)
        self.BLUR = False 
        self.CLOSE = True

    def kernel_init(self, shape, dtype=float, partition_info=None):
        if self.BLUR == "Gaussian":
            kernel = np.zeros(shape=shape)
            kernel[:,:,0,0] = np.array([[0.006],[0.061],[0.242],[0.383],[0.242],[0.061],[0.006]])
            return kernel
        elif self.BLUR == "SG":
            kernel = np.zeros(shape=shape)
            #kernel[:,:,0,0] = np.array([[-3],[12],[17],[12],[-3]])/35 # WINDOW 5
            kernel[:,:,0,0] = np.array([[-2],[3],[6],[7],[6],[3],[-2]])/21 # WINDOW 7
            return kernel

    def edge_padding(self, X):

        """
        Lambda要调用的自定义填充层。将每个端点相加3次分别产生更清洁的边缘条件
        """

        if self.BLUR == 'SG':
            """
            带有端点的SG过滤器填充

            """
            Xlow = X[:, 0, :, :][:, np.newaxis, :, :]
            Xhigh = X[:, -1, :, :][:, np.newaxis, :, :]
            #X = K.concatenate((Xlow, Xlow, X, Xhigh, Xhigh), axis=1)
            X = K.concatenate((Xlow, Xlow, Xlow, X, Xhigh, Xhigh, Xhigh), axis=1)
            return X

        elif self.BLUR == 'Gaussian':
            """
            具有镜像条件的高斯模糊填充以保持端点

            """
            Xlow0 = X[:, 0, :, :]
            Xlow1 = (2.0*Xlow0 - X[:, 1, :, :])[:, np.newaxis, :, :]
            Xlow2 = (2.0*Xlow0 - X[:, 2, :, :])[:, np.newaxis, :, :]
            Xlow3 = (2.0*Xlow0 - X[:, 3, :, :])[:, np.newaxis, :, :]

            Xhigh0 = X[:, -1, :, :]
            Xhigh1 = (2.0*Xhigh0 - X[:, -2, :, :])[:, np.newaxis, :, :]
            Xhigh2 = (2.0*Xhigh0 - X[:, -3, :, :])[:, np.newaxis, :, :]
            Xhigh3 = (2.0*Xhigh0 - X[:, -4, :, :])[:, np.newaxis, :, :]

            X = K.concatenate((Xlow3,Xlow2,Xlow1,X,Xhigh1,Xhigh2,Xhigh3), axis=1)
            return X

    def closing(self, X):

        Xlow = X[:, 0, :, :][:, np.newaxis, :, :]
        Xhigh = X[:, -1, :, :][:, np.newaxis, :, :]
        Xmean = (Xlow+Xhigh)*0.5
        return K.concatenate((Xmean, X[:, 1:-1, :, :], Xmean), axis=1)

    def build_generator(self):

        """
        发电机网络：
        -输入：（标准杆数\u DIM）+（LAT\u dimm）
        -输出：（DAT_SHP）
        """

        y_in = Input(shape=self.PAR_DIM)
        z_in = Input(shape=self.LAT_DIM)

        net = concatenate([y_in, z_in], axis=-1)
        net = Dense(self.DAT_SHP[0]/8*2*self.DEPTH*4)(net)
        net = LeakyReLU(alpha=0.2)(net)
        net = Reshape((self.DAT_SHP[0]//8, 2, self.DEPTH*4))(net)

        net = SpectralNormalization(Conv2DTranspose(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = SpectralNormalization(Conv2DTranspose(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = Conv2DTranspose(1, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init, activation='tanh')(net)

        if self.CLOSE:
            net = Lambda(self.closing)(net)

        if self.BLUR == "Gaussian":
            net = Lambda(self.edge_padding)(net)
            net = Conv2D(1, (7,1), strides=(1,1), padding='valid', kernel_initializer=self.kernel_init, trainable=False, use_bias=False)(net)
        elif self.BLUR == "SG":
            net = Lambda(self.edge_padding)(net)
            net = Conv2D(1, (7,1), strides=(1,1), padding='valid', kernel_initializer=self.kernel_init, trainable=False, use_bias=False)(net)

        X_out = net

        model = Model(inputs=[y_in, z_in], outputs=X_out)

        return model

    def build_discriminator(self):

        """
        输入：（DAT_SHP）
        输出：（1）+（标准杆数\U DIM）
        """

        X_in = Input(self.DAT_SHP)
        y_in = Input(self.PAR_DIM)

        Xnet = GaussianNoise(0.00)(X_in)

        ynet = Dense(np.prod(self.DAT_SHP))(y_in)
        ynet = Reshape(self.DAT_SHP)(ynet)
        net = concatenate([Xnet, ynet], axis=-1)

        net = SpectralNormalization(Conv2D(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = SpectralNormalization(Conv2D(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = SpectralNormalization(Conv2D(self.DEPTH*4, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = Flatten()(net)

        net = Dense(64)(net)
        net = LeakyReLU(alpha=0.2)(net)

        w_out = Dense(1, activation='sigmoid')(net)

        model = Model(inputs=[X_in, y_in], outputs=w_out)
        model.compile(loss=BinaryCrossentropy(label_smoothing=0.3), metrics=['accuracy'], optimizer=Adam(lr=self.LEARN_RATE, beta_1=0.5))

        return model

    def build_encoder(self):

        """
        输入 : (DAT_SHP)
        输出 : (1) + (PAR_DIM)
        """

        X_in = Input(self.DAT_SHP)

        net = GaussianNoise(0.00)(X_in)

        net = SpectralNormalization(Conv2D(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = SpectralNormalization(Conv2D(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = SpectralNormalization(Conv2D(self.DEPTH*4, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
        net = LeakyReLU(alpha=0.2)(net)

        net = Flatten()(net)

        net = Dense(64)(net)
        net = LeakyReLU(alpha=0.2)(net)

        y_out = Dense(self.PAR_DIM, activation='linear')(net)

        z_out = Dense(self.LAT_DIM, activation='linear')(net)

        model = Model(inputs=[X_in], outputs=[y_out, z_out])

        return model

    def build_gan(self, g_model, d_model):

        """
        发电机用发电机和鉴别器网络组合的GAN网络
        训练鉴别器未在此模型中训练
        """

        d_model.trainable = False

        y_in = Input(shape=self.PAR_DIM)
        z_in = Input(shape=self.LAT_DIM)

        X = g_model([y_in, z_in])

        w_out = d_model([X, y_in])

        gan_model = Model(inputs = [y_in, z_in], outputs = [w_out])
        gan_model.compile(loss=['binary_crossentropy'], metrics=['accuracy'], optimizer=Adam(lr=self.LEARN_RATE, beta_1=0.5))

        return gan_model

    def build_autoencoder(self, e_model, g_model):
        """
        输入:
            * image of IMG_SHP
        输出:
            * image of IMG_SHP
        """

        X_in = Input(self.DAT_SHP)

        y, z = e_model(X_in)

        X_out = g_model([y, z])

        model = Model(inputs = X_in, outputs = X_out)
        model.compile(loss='mean_absolute_error', optimizer=Adam(lr=self.LEARN_RATE/4, beta_1=0.5))
        return model
