import math
import numpy
import logging
import oneflow as flow
import oneflow.nn as nn

logger = logging.getLogger(__name__)



class BasedAttention(nn.Module):
    def __init__(self, source_dim, output_dim, enable_output_proj=True, dropout=0.0):
        super(BasedAttention, self).__init__()

        self.enable_output_proj = enable_output_proj
        if self.enable_output_proj:
            self.output_proj = nn.Linear(source_dim, output_dim)

        self.dropout = nn.Dropout(dropout)

    def compute_context(self, values, scores, mask=None):
        """
        Args:
            values: [b, t2, v] or [b, nh, t2, v]
            scores: [b, t1, t2] or [b, nh, t1, t2]
            mask: [b, t1, t2] or [b, 1/nh, t1, t2]
        """

        assert values.dim() == scores.dim()

        if mask is not None:
            scores = flow.masked_fill(scores, mask == 0, -float("inf"))
        
        weights = flow.softmax(scores, dim=-1)
        context = flow.matmul(weights, values)

        if context.dim() == 4:
            b, n, t, v = context.size()
            context = context.transpose(1, 2).reshape(b, t, n * v)
        
        if self.enable_output_proj:
            context = self.output_proj(context)

        return self.dropout(context), weights
    

class MultiHeadedAttention(nn.Module):
    """Multi-Head Attention layer

    :param int n_head: the number of head s
    :param int d_model: the number of features
    :param float dropout_rate: dropout rate
    """

    def __init__(self, n_heads, d_model, dropout_rate=0.0, apply_initialization=False):
        super(MultiHeadedAttention, self).__init__()
        assert d_model % n_heads == 0
        # We assume d_v always equals d_k
        self.d_k = d_model // n_heads
        self.n_heads = n_heads
        self.apply_initialization = apply_initialization

        self.linear_q = nn.Linear(d_model, d_model)
        self.linear_k = nn.Linear(d_model, d_model)
        self.linear_v = nn.Linear(d_model, d_model)
        self.linear_out = nn.Linear(d_model, d_model)

        self.attn = None
        self.dropout = nn.Dropout(p=dropout_rate)

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, query, key, value, mask):
        """Compute 'Scaled Dot Product Attention'

        :param flow.Tensor query: (batch, time1, size)
        :param flow.Tensor key: (batch, time2, size)
        :param flow.Tensor value: (batch, time2, size)
        :param flow.Tensor mask: (batch, time1, time2)
        :param flow.nn.Dropout dropout:
        :return flow.Tensor: attentined and transformed `value` (batch, time1, d_model)
             weighted by the query dot key attention (batch, head, time1, time2)
        """
        q = self.linear_q(query)
        k = self.linear_k(key)
        v = self.linear_v(value)

        context, _ = self.compute_attn_weights_and_context(q, k, v, mask)
        return context

    def init_parameters(self):
        """Initialize parameters with Xavier uniform distribution."""
        nn.init.xavier_uniform_(self.linear_q.weight, gain=1 / math.sqrt(2))
        nn.init.xavier_uniform_(self.linear_k.weight, gain=1 / math.sqrt(2))
        nn.init.xavier_uniform_(self.linear_v.weight, gain=1 / math.sqrt(2))
        nn.init.xavier_uniform_(self.linear_out.weight)
        logger.debug('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)

    def inference_slf(self, query, key, value, mask=None, cache=None, num_cache_steps=-1):
        # query: [batch, t1, model_size]
        # key, value: [batch, t2, model_size]
        # cache: {'k': tensor, 'v':tensor}
        assert query.size(0) == 1

        q = self.linear_q(query)  # [batch, t1, model_size]
        k = self.linear_k(key)    # [batch, t2, model_size]
        v = self.linear_v(value)  # [batch, t2, model_size]

        if cache is not None:
            cache_k = cache['k']
            cache_v = cache['v']

            k = flow.cat([cache_k, k], dim=1)
            v = flow.cat([cache_v, v], dim=1)

        if num_cache_steps > 0:
            real_len = k.size(1)
            cache = {
                'k': k[:, -min(real_len, num_cache_steps):, :],
                'v': v[:, -min(real_len, num_cache_steps):, :]
            }
        else:
            cache = {
                'k': k,
                'v': v
            }

        context, _ = self.compute_attn_weights_and_context(q, k, v, mask)
        return context, cache

    def inference_src(self, query, key, value, mask=None, cache=None):
        # query: [batch, t1, model_size]
        # key, value: [batch, t2, model_size]
        # cache: {'q': tensor,'k': tensor, 'v':tensor}
        assert query.size(0) == 1

        q = self.linear_q(query)  # [batch, t1, model_size]

        if cache is not None and cache['k'] is not None and cache['v'] is not None:
            k = cache['k']
            v = cache['v']
        else:
            k = self.linear_k(key)  # [batch, t2, model_size]
            v = self.linear_v(value)  # [batch, t2, model_size]

        cache = {
            'k': k,
            'v': v
        }

        context, _ = self.compute_attn_weights_and_context(q, k, v, mask)
        return context, cache

    def compute_attn_weights_and_context(self, q, k, v, mask=None):
        n_batch = q.size(0)
        q = q.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time1, d_k)
        k = k.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time2, d_k)
        v = v.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time2, d_k)

        scores = flow.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)  # (batch, head, time1, time2)
        if mask is not None:
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, time1, time2)
            min_value = float(numpy.finfo(flow.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            attn_weights = flow.softmax(scores, dim=-1).masked_fill(mask, 0.0)  # (batch, head, time1, time2)
        else:
            attn_weights = flow.softmax(scores, dim=-1)  # (batch, head, time1, time2)

        p_attn = self.dropout(attn_weights)
        x = flow.matmul(p_attn, v)  # (batch, head, time1, d_k)
        x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.n_heads * self.d_k)  # (batch, time1, d_model)
        context = self.linear_out(x)  # (batch, time1, d_model)
        return context, attn_weights


class MHAforSourceAttention(nn.Module):
    """Multi-Head Attention layer

    :param int n_head: the number of head s
    :param int d_model: the number of features
    :param float dropout_rate: dropout rate
    """

    def __init__(self, query_size, memory_size, attn_size, n_heads, dropout_rate=0.0):
        super(MHAforSourceAttention, self).__init__()
        assert attn_size % n_heads == 0
        # We assume d_v always equals d_k
        self.d_k = attn_size // n_heads
        self.n_heads = n_heads

        self.query_dim = query_size
        self.memory_dim = memory_size

        self.linear_q = nn.Linear(query_size, attn_size)
        self.linear_k = nn.Linear(memory_size, attn_size)
        self.linear_v = nn.Linear(memory_size, attn_size)
        self.linear_out = nn.Linear(attn_size, attn_size)

        self.attn = None
        self.dropout = nn.Dropout(p=dropout_rate)

    def forward(self, query, key, value, mask):
        """Compute 'Scaled Dot Product Attention'

        :param flow.Tensor query: (batch, time1, size)
        :param flow.Tensor key: (batch, time2, size)
        :param flow.Tensor value: (batch, time2, size)
        :param flow.Tensor mask: (batch, time1, time2)
        :param flow.nn.Dropout dropout:
        :return flow.Tensor: attentined and transformed `value` (batch, time1, d_model)
             weighted by the query dot key attention (batch, head, time1, time2)
        """
        q = self.linear_q(query)
        k = self.linear_k(key)
        v = self.linear_v(value)

        context, _ = self.compute_attn_weights_and_context(q, k, v, mask)
        return context

    def inference(self, query, key, value, mask=None, cache=None):
        # query: [batch, t1, model_size]
        # key, value: [batch, t2, model_size]
        # cache: {'q': tensor,'k': tensor, 'v':tensor}
        assert query.size(0) == 1

        q = self.linear_q(query)  # [batch, t1, model_size]

        if cache is not None and cache['k'] is not None and cache['v'] is not None:
            k = cache['k']
            v = cache['v']
        else:
            k = self.linear_k(key)  # [batch, t2, model_size]
            v = self.linear_v(value)  # [batch, t2, model_size]

        cache = {
            'k': k,
            'v': v
        }

        context, _ = self.compute_attn_weights_and_context(q, k, v, mask)
        return context, cache

    def compute_attn_weights_and_context(self, q, k, v, mask=None):
        n_batch = q.size(0)
        q = q.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time1, d_k)
        k = k.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time2, d_k)
        v = v.view(n_batch, -1, self.n_heads, self.d_k).transpose(1, 2)  # (batch, head, time2, d_k)

        scores = flow.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)  # (batch, head, time1, time2)
        if mask is not None:
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, time1, time2)
            min_value = float(numpy.finfo(flow.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            attn_weights = flow.softmax(scores, dim=-1).masked_fill(mask, 0.0)  # (batch, head, time1, time2)
        else:
            attn_weights = flow.softmax(scores, dim=-1)  # (batch, head, time1, time2)

        p_attn = self.dropout(attn_weights)
        x = flow.matmul(p_attn, v)  # (batch, head, time1, d_k)
        x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.n_heads * self.d_k)  # (batch, time1, d_model)
        context = self.linear_out(x)  # (batch, time1, d_model)
        return context, attn_weights


class MultiHeadedSelfAttention(BasedAttention):
    def __init__(self, n_heads, d_model, dropout_rate=0.0, share_qvk_proj=False):
        super(MultiHeadedSelfAttention, self).__init__(d_model, d_model, enable_output_proj=True, dropout=dropout_rate)

        self.d_model = d_model
        self.share_qvk_proj = share_qvk_proj
        self.nheads = n_heads
        self.d_k = d_model // n_heads

        self.qvk_proj = nn.Linear(d_model, d_model if self.share_qvk_proj else d_model * 3)

    def forward(self, x, mask):
        """Compute 'Scaled Dot Product Attention'

        :param flow.Tensor query: (batch, time1, size)
        :param flow.Tensor mask: (batch, time1 or 1, time2)
        :return flow.Tensor: attentined and transformed `value` (batch, time1, d_model)
        """

        x = self.qvk_proj(x)

        if self.share_qvk_proj:
            query = key = value = x
        else:
            query, key, value = flow.split(x, self.d_model, dim=-1)

        batch_size = x.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        
        scores = flow.matmul(query, key.transpose(2, 3)) / math.sqrt(self.d_k)

        context, attn_weights = self.compute_context(value, scores, mask.unsqueeze(1))

        return context, attn_weights

    def inference(self, x, mask, cache=None):

        x = self.qvk_proj(x)

        if self.share_qvk_proj:
            query = key = value = x
        else:
            query, key, value = flow.split(x, self.d_model, dim=-1)

        batch_size = x.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        
        scores = flow.matmul(query, key.transpose(2, 3)) / math.sqrt(self.d_k)

        context, attn_weights = self.compute_context(value, scores, mask.unsqueeze(1))

        return context, attn_weights, cache


class MultiHeadedCrossAttention(BasedAttention):
    def __init__(self, n_heads, d_model, source_dim, dropout_rate=0.0, share_vk_proj=False):
        super(MultiHeadedCrossAttention, self).__init__(d_model, d_model, enable_output_proj=True, dropout=dropout_rate)

        self.d_model = d_model
        self.share_vk_proj = share_vk_proj
        self.nheads = n_heads
        self.d_k = d_model // n_heads
        self.source_dim = source_dim

        self.q_proj = nn.Linear(d_model, d_model)
        self.vk_proj = nn.Linear(source_dim, d_model if self.share_vk_proj else d_model * 2)

    def forward(self, query, memory, memory_mask):
        """Compute 'Scaled Dot Product Attention'

        :param flow.Tensor query: (batch, time1, size)
        :param flow.Tensor memory: (batch, time2, source_dim)
        :param flow.Tensor mask: (batch, time1 or 1, time2)
        :return flow.Tensor: attentined and transformed `value` (batch, time1, d_model)
        """

        query = self.q_proj(query)
        memory = self.vk_proj(memory)

        if self.share_vk_proj:
            key = value = memory
        else:
            key, value = flow.split(memory, self.d_model, dim=-1)

        batch_size = query.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        
        scores = flow.matmul(query, key.transpose(2, 3)) / math.sqrt(self.d_k)

        context, attn_weights = self.compute_context(value, scores, memory_mask.unsqueeze(1))

        return context, attn_weights

    def inference(self, query, memory, memory_mask, cache=None):
        """Compute 'Scaled Dot Product Attention'

        :param flow.Tensor query: (batch, time1, size)
        :param flow.Tensor memory: (batch, time2, size)
        :param flow.Tensor mask: (batch, time1 or 1, time2)
        :return flow.Tensor: attentined and transformed `value` (batch, time1, d_model)
        """

        query = self.q_proj(query)
        memory = self.vk_proj(memory)

        if self.share_vk_proj:
            key = value = memory
        else:
            key, value = flow.split(memory, self.d_model, dim=-1)

        batch_size = query.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        
        scores = flow.matmul(query, key.transpose(2, 3)) / math.sqrt(self.d_k)

        context, attn_weights = self.compute_context(value, scores, memory_mask.unsqueeze(1))

        return context, attn_weights, cache


class MultiHeadedSelfAttentionWithRelPos(MultiHeadedSelfAttention):
    def __init__(self, n_heads, d_model, dropout_rate=0.0, share_qvk_proj=False):
        super(MultiHeadedSelfAttentionWithRelPos, self).__init__(n_heads, d_model, dropout_rate, share_qvk_proj)

        self.pos_proj = nn.Linear(d_model, d_model, bias=False)
        self.posu = nn.Parameter(flow.Tensor(n_heads, self.d_k))
        self.posv = nn.Parameter(flow.Tensor(n_heads, self.d_k))

        flow.nn.init.xavier_normal_(self.posu)
        flow.nn.init.xavier_normal_(self.posv)

    def _shift(self, x, zero_triu=False):
        """Compute relative positinal encoding.
        Args:
            x (flow.Tensor): Input tensor (batch, nheads, time, size).
            zero_triu (bool): If true, return the lower triangular part of the matrix.
        Returns:
            flow.Tensor: Output tensor.
        """
        b, nh, t, v = x.size()
        zero_pad = flow.zeros((b, nh, t, 1), device=x.device, dtype=x.dtype)
        x_padded = flow.cat([zero_pad, x], dim=-1) # (b, nh, t, v+1)

        x_padded = x_padded.view(b, nh, v+1, t) # (b, nh, v+1, t)
        x = x_padded[:, :, 1:].view_as(x)

        if zero_triu:
            ones = flow.ones((t, v))
            x = x * flow.tril(ones, v - t)[None, None, :, :]

        return x

    def forward(self, x, mask, pos):
        """
        Args:
            x: [batch_size, time, size]
            mask: [batch_size, 1, time]
            pos: positional embedding [batch_size, time, size]
        """

        x = self.qvk_proj(x)

        if self.share_qvk_proj:
            query = key = value = x
        else:
            query, key, value = flow.split(x, self.d_model, dim=-1)

        batch_size = x.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)

        bpos = pos.size(0)
        pos = self.pos_proj(pos).reshape(bpos, -1, self.nheads, self.d_k).transpose(1, 2)

        query_with_bias_u = query + self.posu
        query_with_bias_u = query_with_bias_u.transpose(1, 2)

        query_with_bias_v = query + self.posv
        query_with_bias_v = query_with_bias_v.transpose(1, 2)

        matrix_ac = flow.matmul(query_with_bias_u, key.transpose(-2, -1))

        matrix_bd = flow.matmul(query_with_bias_v, pos.transpose(-2, -1))
        matrix_bd = self._shift(matrix_bd)

        scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
        context, attn_weights = self.compute_context(value, scores, mask.unsqueeze(1))

        return context, attn_weights


    def inference(self, x, mask, pos, cache):
        
        """
        Args:
            x: [batch_size, time, size]
            mask: [batch_size, 1, time]
            pos: positional embedding [batch_size, time, size]
        """

        x = self.qvk_proj(x)

        if self.share_qvk_proj:
            query = key = value = x
        else:
            query, key, value = flow.split(x, self.d_model, dim=-1)

        batch_size = x.size(0)
        query = query.reshape(batch_size, -1, self.nheads, self.d_k)
        key = key.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)
        value = value.reshape(batch_size, -1, self.nheads, self.d_k).transpose(1, 2)

        bpos = pos.size(0)
        pos = self.pos_proj(pos).reshape(bpos, -1, self.nheads, self.d_k).transpose(1, 2)

        query_with_bias_u = query + self.posu
        query_with_bias_u = query_with_bias_u.transpose(1, 2)

        query_with_bias_v = query + self.posv
        query_with_bias_v = query_with_bias_v.transpose(1, 2)

        matrix_ac = flow.matmul(query_with_bias_u, key.transpose(-2, -1))

        matrix_bd = flow.matmul(query_with_bias_v, pos.transpose(-2, -1))
        matrix_bd = self._shift(matrix_bd)

        scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
        context, attn_weights = self.compute_context(value, scores, mask.unsqueeze(1))

        return context, attn_weights, cache
