from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.keras.layers import Layer
import math
import  tensorflow as tf



class SelfAttention(Layer):
    def __init__(self):
        super(SelfAttention, self).__init__()

    def build(self, input_shape):
        n, h, w, c = input_shape.shape
        #        n=2886   h=5  w=5  c=30
        #       张量图 = [5*5]
        self.n_feats = h * w
        # 通过1*1 的conv得到Q,K,V
        self.conv_Q = layers.Conv2D(c, 1, padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_Q')
        self.conv_K = layers.Conv2D(c, 1, padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_K')
        self.conv_V = layers.Conv2D(c, 1, padding='same',
                                    kernel_initializer=keras.initializers.glorot_uniform(seed=None), name='Conv_V')
        self.conv_attn_g = layers.Conv2D(c, 1, padding='same',
                                         kernel_initializer=keras.initializers.glorot_uniform(seed=None),
                                         name='Conv_AttnG')
        self.sigma = self.add_weight(shape=[1], initializer='zeros', trainable=True, name='sigma')

    def call(self, x):
        n, h, w, c = x.shape
        Q = self.conv_Q(x)  # [2886*5*5*30]
        # N,H,W,C  ==>  N,H*W,C
        Q = tf.reshape(Q, (-1, self.n_feats, Q.shape[-1]))  # [-1,25,30]
        K = self.conv_K(x)
        # N,H,W,C  ==>  N,H*W,C
        #         K = tf.nn.max_pool2d(K, ksize=2, strides=2, padding='VALID')  #[-1,2,2,30]
        K = tf.reshape(K, (-1, self.n_feats, K.shape[-1]))  # [-1,25,30]
        V = self.conv_V(x)
        #         V = tf.nn.max_pool2d(V, ksize=2, strides=2, padding='VALID') #  [-1,2,2,30]
        V = tf.reshape(V, (-1, self.n_feats, V.shape[-1]))
        #       K_T
        #       N,H,W,C  ==>  N,C,H*W
        K_T = tf.transpose(K, perm=[0, 2, 1])
        attn = tf.matmul(Q, K_T)
        attn = tf.nn.softmax(attn // (math.sqrt(c)))
        attn_g = tf.matmul(attn, V)
        attn_g = tf.reshape(attn_g, (-1, h, w, attn_g.shape[-1]))
        attn_g = self.conv_attn_g(attn_g)
        output = x + self.sigma * attn_g
        return output