import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import  tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import layers


# Conv2DTranspose 的 shape 计算：
# Padding==Same:
# H = H1 * stride
# Padding==Valid:
# H = (H1-1) * stride + HF
# H=output size  H1=input size, HF=height of filter
class Generator(keras.Model):
    # 生成器网络
    def __init__(self):
        super(Generator, self).__init__()
        # z:[b,100] => [b, 3*3*512] => [b,3,3,512] => [b,64,64,3]
        self.fc = layers.Dense(3*3*512)

        self.conv1 = layers.Conv2DTranspose(256, 3, 3, 'valid')
        self.bn1 = layers.BatchNormalization()

        self.conv2 = layers.Conv2DTranspose(128, 5, 2, 'valid')
        self.bn2 = layers.BatchNormalization()

        self.conv3 = layers.Conv2DTranspose(3, 4, 3, 'valid')


    def call(self, inputs, training=None):

        x = self.fc(inputs)
        # [b, 3, 3, 512]
        x = tf.reshape(x, [-1, 3, 3, 512])
        x = tf.nn.leaky_relu(x)

        # [b, 9, 9, 256]
        x = tf.nn.leaky_relu(self.bn1(self.conv1(x), training=training))
        # [b, 21, 21, 128]
        x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training))
        # [b, 64, 64, 3]
        x = self.conv3(x)
        x = tf.tanh(x)

        return x

# 公式：output=(input+2*padding-kernel_size)/stride+1
# 参数为padding='valid'时，padding相当于=0
# 参数为padding='same'时，不用算了，output_size=input_size
class Discriminator(keras.Model):
    # 判别器
    def __init__(self):
        super(Discriminator, self).__init__()
        # [b, 64, 64, 3] => [b,1]
        self.conv1 = layers.Conv2D(64, 5, 3, 'valid')

        self.conv2 = layers.Conv2D(128, 5, 3, 'valid')
        self.bn2 = layers.BatchNormalization()

        self.conv3 = layers.Conv2D(256, 5, 3, 'valid')
        self.bn3 = layers.BatchNormalization()

        #  [b, h, w, 3] => [b, -1]
        self.flatten = layers.Flatten()

        self.fc = layers.Dense(1)


    def call(self, inputs, training=None):
        # [b, 20, 20, 64]
        x = tf.nn.leaky_relu(self.conv1(inputs))
        # [b, 6, 6, 128]
        x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training))
        # [b, 1, 1, 256]
        x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=training))

        # [b, h, w, c] => [b, -1]
        x = self.flatten(x)
        # [-b,-1] =>[b, 1]
        logits = self.fc(x)

        return logits

def main():

    d = Discriminator()
    g = Generator()


    x = tf.random.normal([2, 64, 64, 3])
    z = tf.random.normal([2, 100])

    prob = d(x)
    print(prob)
    x_hat = g(z)
    print(x_hat.shape)




if __name__ == '__main__':
    main()