import math


import tensorflow as tf
from tensorflow import keras

from ZhengqiLoader import ZhengqiLoader

class Generate(keras.Model):
    def __init__(self,reduction_ratio,input_shape):
        super().__init__()
        self.reduction_ratio = reduction_ratio
        self.n, self.h, self.w, self.c = input_shape
        self.bath_size, self.hidden_num = input_shape[0], input_shape[3]
        self.CBAM_flatten_maxpool = tf.keras.layers.Flatten()
        self.CBAM_flatten_avgpool = tf.keras.layers.Flatten()
        self.mlp_1_max = tf.keras.layers.Dense(
            units=int(self.hidden_num * self.reduction_ratio), activation=tf.nn.relu)
        self.mlp_2_max = tf.keras.layers.Dense(units=self.hidden_num)
        self.mlp_1_avg = tf.keras.layers.Dense(
            units=int(self.hidden_num * self.reduction_ratio), activation=tf.nn.relu)
        self.mlp_2_avg = tf.keras.layers.Dense(
            units=self.hidden_num, activation=tf.nn.relu)
        # spatial attention
        self.conv_layer = tf.keras.layers.Conv2D(
            self.c, kernel_size=(3, 3), padding="same", activation=None)
        self.conv_Q = tf.keras.layers.Conv2D(self.c, (3,3), padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_Q')
        self.conv_K = tf.keras.layers.Conv2D(self.c,(3,3), padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_K')
        self.conv_V = tf.keras.layers.Conv2D(self.c, (3,3), padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_V')
        self.conv_attn_g = tf.keras.layers.Conv2D(self.c,(3,3), padding='same',
                                         kernel_initializer=keras.initializers.glorot_uniform(seed=None),
                                         name='Conv_AttnG')
        self.sigma = self.add_weight(shape=[1], initializer='zeros', trainable=True, name='sigma')
        self.conv_1 = tf.keras.layers.Conv2D(10, (3,3), padding='same',
                                             kernel_initializer=keras.initializers.glorot_uniform(seed=None),
                                             name='Conv_V')
        self.flatten = tf.keras.layers.Flatten()
        self.dense_1 = tf.keras.layers.Dense(10)
        self.dense_out = tf.keras.layers.Dense(6)
    def call(self, x,training=None, mask=None):
        # channel attention  通道注意力
        maxpool_channel = tf.reduce_max(tf.reduce_max(
            x, axis=1, keepdims=True), axis=2, keepdims=True)
        avgpool_channel = tf.reduce_mean(tf.reduce_mean(
            x, axis=1, keepdims=True), axis=2, keepdims=True)
        maxpool_channel = self.CBAM_flatten_maxpool(maxpool_channel)
        avgpool_channel = self.CBAM_flatten_avgpool(avgpool_channel)
        mlp_max_out = self.mlp_1_max(maxpool_channel)
        mlp_max_dense = tf.reshape(self.mlp_2_max(
            mlp_max_out), [-1, 1, 1, self.hidden_num])
        mlp_avg_out = self.mlp_1_avg(avgpool_channel)
        mlp_avg_dense = tf.reshape(self.mlp_2_avg(
            mlp_avg_out), [-1, 1, 1, self.hidden_num])
        channel_attention = tf.nn.sigmoid(mlp_max_dense + mlp_avg_dense)
        channel_refined_feature = x * channel_attention
        # spatital attention
        maxpool_spatial = tf.reduce_max(
            channel_refined_feature, axis=3, keepdims=True)
        avgpool_spatial = tf.reduce_mean(
            channel_refined_feature, axis=3, keepdims=True)
        max_avg_pool_spatial = tf.concat(
            [maxpool_spatial, avgpool_spatial], axis=3)
        conv_layer = self.conv_layer(max_avg_pool_spatial)
        spatial_attention = tf.nn.sigmoid(conv_layer)      #,training=training
        refined_feature = channel_refined_feature * spatial_attention
        output_layer = refined_feature + x
                # 应该是output_layer,由于CBAM的输出shape和SA的输入shape一致
        #        n=2886   h=5  w=5  c=30
        #       张量图 = [5*5]
        self.n_feats = self.h * self.w



        "自注意力"
        Q = self.conv_Q(output_layer)  # [2886*5*5*30]
        Q = tf.reshape(Q, (-1, self.n_feats, Q.shape[-1]))  # [-1,25,30]
        K = self.conv_K(output_layer)
        K = tf.reshape(K, (-1, self.n_feats, K.shape[-1]))  # [-1,25,30]
        V = self.conv_V(output_layer)
        V = tf.reshape(V, (-1, self.n_feats, V.shape[-1]))
        K_T = tf.transpose(K, perm=[0, 2, 1])
        attn = tf.matmul(Q, K_T)
        attn = tf.nn.softmax(attn,axis=-1)
        attn_g = tf.matmul(attn, V)
        attn_g = tf.reshape(attn_g, (-1, self.h, self.w, attn_g.shape[-1]))
        attn_g = self.conv_attn_g(attn_g)
        SA_output = output_layer + self.sigma * attn_g
        sa_output=tf.nn.relu(self.conv_1(SA_output))
        sa_output=self.flatten(sa_output)
        sa_output=tf.nn.relu(self.dense_1(sa_output))
        sa_output= tf.nn.relu(self.dense_out(sa_output))
        return sa_output


if __name__ == '__main__':
    URL = './zhengqi_train.txt'
    BACH_SIZE = 10
    loader = ZhengqiLoader(URL)
    x_train_data, train_dataset, y_train, x_test_data,y_test = loader.preprocess(BACH_SIZE)
    print(x_train_data.shape)
    gen = Generate(reduction_ratio=0.5,input_shape=(None,5,5,30))
    gen.build(input_shape=(None,5,5,30))
    gen.summary()

