import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Dropout

class AttentionLayer(Layer):
    """注意力机制层"""
    
    def __init__(self, attention_dim=64, dropout_rate=0.2, **kwargs):
        super(AttentionLayer, self).__init__(**kwargs)
        self.attention_dim = attention_dim
        self.dropout_rate = dropout_rate
        
    def build(self, input_shape):
        self.W = self.add_weight(
            name='attention_weight',
            shape=(input_shape[-1], self.attention_dim),
            initializer='glorot_uniform',
            trainable=True
        )
        self.b = self.add_weight(
            name='attention_bias',
            shape=(self.attention_dim,),
            initializer='zeros',
            trainable=True
        )
        self.u = self.add_weight(
            name='attention_context',
            shape=(self.attention_dim, 1),
            initializer='glorot_uniform',
            trainable=True
        )
        self.dropout = Dropout(self.dropout_rate)
        super(AttentionLayer, self).build(input_shape)
    
    def call(self, inputs, training=None):
        # 计算注意力得分
        attention_inputs = tf.nn.tanh(tf.matmul(inputs, self.W) + self.b)
        attention_scores = tf.matmul(attention_inputs, self.u)
        attention_scores = tf.squeeze(attention_scores, axis=-1)
        
        # 应用softmax
        attention_weights = tf.nn.softmax(attention_scores, axis=-1)
        attention_weights = tf.expand_dims(attention_weights, axis=-1)
        
        # 加权求和
        weighted_output = tf.reduce_sum(inputs * attention_weights, axis=1)
        weighted_output = self.dropout(weighted_output, training=training)
        
        return weighted_output, attention_weights
    
    def get_config(self):
        config = super(AttentionLayer, self).get_config()
        config.update({
            'attention_dim': self.attention_dim,
            'dropout_rate': self.dropout_rate
        })
        return config

class MultiHeadAttention(Layer):
    """多头注意力机制"""
    
    def __init__(self, num_heads=4, head_dim=32, dropout_rate=0.1, **kwargs):
        super(MultiHeadAttention, self).__init__(**kwargs)
        self.num_heads = num_heads
        self.head_dim = head_dim
        self.dropout_rate = dropout_rate
        
    def build(self, input_shape):
        self.dense_q = Dense(self.num_heads * self.head_dim, use_bias=False)
        self.dense_k = Dense(self.num_heads * self.head_dim, use_bias=False)
        self.dense_v = Dense(self.num_heads * self.head_dim, use_bias=False)
        self.dense_output = Dense(input_shape[-1])
        self.dropout = Dropout(self.dropout_rate)
        super(MultiHeadAttention, self).build(input_shape)
    
    def call(self, inputs, training=None):
        batch_size = tf.shape(inputs)[0]
        
        # 线性变换
        q = self.dense_q(inputs)
        k = self.dense_k(inputs)
        v = self.dense_v(inputs)
        
        # 重塑为多头
        q = tf.reshape(q, [batch_size, -1, self.num_heads, self.head_dim])
        k = tf.reshape(k, [batch_size, -1, self.num_heads, self.head_dim])
        v = tf.reshape(v, [batch_size, -1, self.num_heads, self.head_dim])
        
        # 转置以便计算注意力
        q = tf.transpose(q, [0, 2, 1, 3])
        k = tf.transpose(k, [0, 2, 1, 3])
        v = tf.transpose(v, [0, 2, 1, 3])
        
        # 计算注意力得分
        scores = tf.matmul(q, k, transpose_b=True)
        scores = scores / tf.math.sqrt(tf.cast(self.head_dim, tf.float32))
        
        # 应用softmax
        attention_weights = tf.nn.softmax(scores, axis=-1)
        attention_weights = self.dropout(attention_weights, training=training)
        
        # 应用注意力权重
        output = tf.matmul(attention_weights, v)
        output = tf.transpose(output, [0, 2, 1, 3])
        output = tf.reshape(output, [batch_size, -1, self.num_heads * self.head_dim])
        
        # 最终线性变换
        output = self.dense_output(output)
        return output, attention_weights