"""Multi-head attention mechanism for Tacotron2 Chinese speech synthesis"""

import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import BahdanauAttention
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops, math_ops, nn_ops, variable_scope
import numpy as np


def scaled_dot_product_attention(Q, K, V, mask=None):
    """
    Compute scaled dot-product attention.
    
    Args:
        Q: Query tensor of shape [batch_size, num_heads, seq_len, d_k]
        K: Key tensor of shape [batch_size, num_heads, seq_len, d_k]
        V: Value tensor of shape [batch_size, num_heads, seq_len, d_v]
        mask: Optional mask tensor of shape [batch_size, seq_len]
    
    Returns:
        Output tensor and attention weights
    """
    d_k = tf.cast(tf.shape(K)[-1], tf.float32)
    
    # Compute attention scores
    scores = tf.matmul(Q, K, transpose_b=True) / tf.sqrt(d_k)
    
    # Apply mask if provided
    if mask is not None:
        mask = tf.expand_dims(mask, 1)  # [batch_size, 1, seq_len]
        mask = tf.expand_dims(mask, 1)  # [batch_size, 1, 1, seq_len]
        scores = tf.where(mask, scores, tf.fill(tf.shape(scores), -1e9))
    
    # Apply softmax to get attention weights
    attention_weights = tf.nn.softmax(scores, axis=-1)
    
    # Apply attention weights to values
    output = tf.matmul(attention_weights, V)
    
    return output, attention_weights


class MultiHeadAttention(tf.layers.Layer):
    """
    Multi-head attention layer.
    """
    
    def __init__(self, d_model, num_heads, dropout_rate=0.1, name="multihead_attention"):
        super(MultiHeadAttention, self).__init__(name=name)
        self.d_model = d_model
        self.num_heads = num_heads
        self.dropout_rate = dropout_rate
        
        assert d_model % num_heads == 0
        self.d_k = d_model // num_heads
        self.d_v = d_model // num_heads
        
        # Linear transformations for Q, K, V
        self.W_q = tf.layers.Dense(d_model, name="W_q")
        self.W_k = tf.layers.Dense(d_model, name="W_k")
        self.W_v = tf.layers.Dense(d_model, name="W_v")
        
        # Output projection
        self.W_o = tf.layers.Dense(d_model, name="W_o")
        
        # Dropout layer
        self.dropout = tf.layers.Dropout(dropout_rate)
    
    def split_heads(self, x, batch_size):
        """Split the last dimension into (num_heads, d_k)."""
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.d_k))
        return tf.transpose(x, perm=[0, 2, 1, 3])
    
    def combine_heads(self, x, batch_size):
        """Combine the heads back into a single dimension."""
        x = tf.transpose(x, perm=[0, 2, 1, 3])
        return tf.reshape(x, (batch_size, -1, self.d_model))
    
    def call(self, query, key, value, mask=None, training=None):
        batch_size = tf.shape(query)[0]
        
        # Linear transformations and split into multiple heads
        Q = self.split_heads(self.W_q(query), batch_size)
        K = self.split_heads(self.W_k(key), batch_size)
        V = self.split_heads(self.W_v(value), batch_size)
        
        # Apply scaled dot-product attention
        attention_output, attention_weights = scaled_dot_product_attention(Q, K, V, mask)
        
        # Combine heads
        attention_output = self.combine_heads(attention_output, batch_size)
        
        # Final linear transformation
        output = self.W_o(attention_output)
        
        # Apply dropout during training
        if training:
            output = self.dropout(output, training=training)
        
        return output, attention_weights


class LocationSensitiveMultiHeadAttention(BahdanauAttention):
    """
    Location-sensitive multi-head attention mechanism for Tacotron2.
    Combines the benefits of location-sensitive attention with multi-head attention.
    """
    
    def __init__(self,
                 num_units,
                 memory,
                 hparams,
                 is_training,
                 num_heads=8,
                 mask_encoder=True,
                 memory_sequence_length=None,
                 smoothing=False,
                 cumulate_weights=True,
                 name='LocationSensitiveMultiHeadAttention'):
        """
        Initialize the multi-head location-sensitive attention mechanism.
        
        Args:
            num_units: The depth of the query mechanism
            memory: The memory to query; usually the output of an RNN encoder
            hparams: Hyperparameters
            is_training: Whether in training mode
            num_heads: Number of attention heads
            mask_encoder: Boolean, whether to mask encoder paddings
            memory_sequence_length: Sequence lengths for the batch entries in memory
            smoothing: Boolean, whether to use smoothing normalization
            cumulate_weights: Boolean, whether to cumulate attention weights
            name: Name for the attention mechanism
        """
        self.num_heads = num_heads
        self.d_model = num_units
        self.d_k = num_units // num_heads
        self.d_v = num_units // num_heads
        
        # Ensure d_model is divisible by num_heads
        if num_units % num_heads != 0:
            raise ValueError(f"num_units ({num_units}) must be divisible by num_heads ({num_heads})")
        
        normalization_function = _smoothing_normalization if smoothing else None
        memory_length = memory_sequence_length if mask_encoder else None
        
        super(LocationSensitiveMultiHeadAttention, self).__init__(
            num_units=num_units,
            memory=memory,
            memory_sequence_length=memory_length,
            probability_fn=normalization_function,
            name=name)
        
        # Location-sensitive components
        self.location_convolution = tf.layers.Conv1D(
            filters=hparams.attention_filters,
            kernel_size=hparams.attention_kernel,
            padding='same',
            use_bias=True,
            bias_initializer=tf.zeros_initializer(),
            name='location_features_convolution')
        
        self.location_layer = tf.layers.Dense(
            units=num_units,
            use_bias=False,
            dtype=tf.float32,
            name='location_features_layer')
        
        # Multi-head attention components
        self.W_q = tf.layers.Dense(num_units, name="W_q")
        self.W_k = tf.layers.Dense(num_units, name="W_k")
        self.W_v = tf.layers.Dense(num_units, name="W_v")
        self.W_o = tf.layers.Dense(num_units, name="W_o")
        
        # Attention parameters
        self._cumulate = cumulate_weights
        self.synthesis_constraint = hparams.synthesis_constraint and not is_training
        self.attention_win_size = tf.convert_to_tensor(hparams.attention_win_size, dtype=tf.int32)
        self.constraint_type = hparams.synthesis_constraint_type
    
    def split_heads(self, x, batch_size):
        """Split the last dimension into (num_heads, d_k)."""
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.d_k))
        return tf.transpose(x, perm=[0, 2, 1, 3])
    
    def combine_heads(self, x, batch_size):
        """Combine the heads back into a single dimension."""
        x = tf.transpose(x, perm=[0, 2, 1, 3])
        return tf.reshape(x, (batch_size, -1, self.d_model))
    
    def _compute_multihead_attention(self, query, keys, values, location_features, mask=None):
        """
        Compute multi-head attention with location features.
        
        Args:
            query: Query tensor [batch_size, 1, d_model]
            keys: Key tensor [batch_size, max_time, d_model]
            values: Value tensor [batch_size, max_time, d_model]
            location_features: Location features [batch_size, max_time, d_model]
            mask: Optional mask tensor
        
        Returns:
            Attention output and weights
        """
        batch_size = tf.shape(query)[0]
        max_time = tf.shape(keys)[1]
        
        # Add location features to keys
        keys_with_location = keys + location_features
        
        # Linear transformations
        Q = self.W_q(query)  # [batch_size, 1, d_model]
        K = self.W_k(keys_with_location)  # [batch_size, max_time, d_model]
        V = self.W_v(values)  # [batch_size, max_time, d_model]
        
        # Split into multiple heads
        Q = self.split_heads(Q, batch_size)  # [batch_size, num_heads, 1, d_k]
        K = self.split_heads(K, batch_size)  # [batch_size, num_heads, max_time, d_k]
        V = self.split_heads(V, batch_size)  # [batch_size, num_heads, max_time, d_v]
        
        # Compute attention scores
        d_k = tf.cast(self.d_k, tf.float32)
        scores = tf.matmul(Q, K, transpose_b=True) / tf.sqrt(d_k)  # [batch_size, num_heads, 1, max_time]
        
        # Apply mask if provided
        if mask is not None:
            mask = tf.expand_dims(mask, 1)  # [batch_size, 1, max_time]
            mask = tf.expand_dims(mask, 1)  # [batch_size, 1, 1, max_time]
            scores = tf.where(mask, scores, tf.fill(tf.shape(scores), -1e9))
        
        # Apply softmax to get attention weights
        attention_weights = tf.nn.softmax(scores, axis=-1)  # [batch_size, num_heads, 1, max_time]
        
        # Apply attention weights to values
        attention_output = tf.matmul(attention_weights, V)  # [batch_size, num_heads, 1, d_v]
        
        # Combine heads
        attention_output = self.combine_heads(attention_output, batch_size)  # [batch_size, 1, d_model]
        
        # Final linear transformation
        output = self.W_o(attention_output)  # [batch_size, 1, d_model]
        
        # Average attention weights across heads for compatibility
        avg_attention_weights = tf.reduce_mean(attention_weights, axis=1)  # [batch_size, 1, max_time]
        avg_attention_weights = tf.squeeze(avg_attention_weights, axis=1)  # [batch_size, max_time]
        
        return output, avg_attention_weights
    
    def __call__(self, query, state, prev_max_attentions):
        """
        Score the query based on the keys and values.
        
        Args:
            query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`
            state: Previous alignments tensor of shape `[batch_size, alignments_size]`
            prev_max_attentions: Previous maximum attention positions
        
        Returns:
            alignments: Tensor of shape `[batch_size, alignments_size]`
        """
        previous_alignments = state
        
        with variable_scope.variable_scope(None, "Location_Sensitive_MultiHead_Attention", [query]):
            # Process location features
            expanded_alignments = tf.expand_dims(previous_alignments, axis=2)
            f = self.location_convolution(expanded_alignments)
            processed_location_features = self.location_layer(f)
            
            # Expand query for multi-head attention
            expanded_query = tf.expand_dims(query, 1)  # [batch_size, 1, query_depth]
            
            # Compute multi-head attention
            attention_output, attention_weights = self._compute_multihead_attention(
                expanded_query, self.keys, self.values, processed_location_features)
            
            # Apply synthesis constraints if enabled
            if self.synthesis_constraint:
                Tx = tf.shape(attention_weights)[-1]
                if self.constraint_type == 'monotonic':
                    key_masks = tf.sequence_mask(prev_max_attentions, Tx)
                    reverse_masks = tf.sequence_mask(Tx - self.attention_win_size - prev_max_attentions, Tx)[:, ::-1]
                else:
                    assert self.constraint_type == 'window'
                    key_masks = tf.sequence_mask(prev_max_attentions - (self.attention_win_size // 2 + (self.attention_win_size % 2 != 0)), Tx)
                    reverse_masks = tf.sequence_mask(Tx - (self.attention_win_size // 2) - prev_max_attentions, Tx)[:, ::-1]
                
                masks = tf.logical_or(key_masks, reverse_masks)
                paddings = tf.ones_like(attention_weights) * (-2 ** 32 + 1)
                attention_weights = tf.where(tf.equal(masks, False), attention_weights, paddings)
            
            # Apply normalization function
            alignments = self._probability_fn(attention_weights, previous_alignments)
            max_attentions = tf.argmax(alignments, -1, output_type=tf.int32)
            
            # Update state
            if self._cumulate:
                next_state = alignments + previous_alignments
            else:
                next_state = alignments
            
            # Return format expected by BahdanauAttention
            return alignments, next_state, max_attentions


def _smoothing_normalization(e):
    """
    Applies a smoothing normalization function instead of softmax.
    
    Args:
        e: matrix [batch_size, max_time]: expected to be energy (score) values
    
    Returns:
        matrix [batch_size, max_time]: [0, 1] normalized alignments
    """
    return tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=-1, keepdims=True) 