# -*- encoding: utf-8 -*-
'''
@File    :   encoders.py
@Time    :   2021/11/22 8:58
@Author  :   ZhangChaoYang
@Desc    :   编码器
'''

import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense, BatchNormalization, Flatten, LeakyReLU, Conv2D, Dropout, LayerNormalization


class FCEncoder(Model):
    '''
    由多层全连接构成的编码器
    '''

    def __init__(self, hidden_dims, activation=tf.nn.relu, batch_norm=True):
        super(FCEncoder, self).__init__()
        layers = [Flatten()]
        for hidden_dim in hidden_dims[:-1]:
            if batch_norm:
                layers.append(BatchNormalization())
            layers.append(Dense(units=hidden_dim, activation=activation))
        if batch_norm:
            layers.append(BatchNormalization())
        # encode的最后一层不加激活函数，不对输出的范围作约束
        layers.append(Dense(units=hidden_dims[-1]))
        self.main = Sequential(layers)

    def call(self, inputs, training=None, mask=None):
        return self.main(inputs)


class ConvEncoder(Model):
    '''
    由多层卷积构成的编码器
    '''

    def __init__(self, kernel_size, strides, filters, active_layer=LeakyReLU):
        super(ConvEncoder, self).__init__()
        self.strides = strides
        layers = []
        for filter in filters:
            layers.append(
                Conv2D(filters=filter, kernel_size=kernel_size, strides=strides, padding='same', use_bias=False))
            layers.append(BatchNormalization(momentum=0.9, epsilon=1e-5))
            layers.append(active_layer())
        layers.append(Flatten())
        self.main = Sequential(layers)

    def call(self, inputs):
        # 图像大小需要能够被strides整除，这样做反卷积的时候才能恢复原来的大小
        assert inputs.shape[1] // self.strides[0] * \
               self.strides[0] == inputs.shape[1]
        assert inputs.shape[2] // self.strides[1] * \
               self.strides[1] == inputs.shape[2]

        return self.main(inputs)
