import logging
import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F

logger = logging.getLogger(__name__)


class MultiLayeredConv1d(nn.Module):
    """Multi-layered conv1d for Transformer block.

    This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
    in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.

    Args:
        in_chans (int): Number of input channels.
        hidden_chans (int): Number of hidden channels.
        kernel_size (int): Kernel size of conv1d.
        dropout_rate (float): Dropout rate.

    .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
        https://arxiv.org/pdf/1905.09263.pdf

    """

    def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate=0.0):
        super(MultiLayeredConv1d, self).__init__()
        self.w_1 = nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
        self.w_2 = nn.Conv1d(hidden_chans, in_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        """Calculate forward propagation.

        Args:
            x (Tensor): Batch of input tensors (B, *, in_chans).

        Returns:
            Tensor: Batch of output tensors (B, *, hidden_chans)

        """
        x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
        return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)


class PositionwiseFeedForward(nn.Module):
    """Positionwise feed forward

    :param int idim: input dimenstion
    :param int hidden_units: number of hidden units
    :param float dropout_rate: dropout rate
    """

    def __init__(self, idim, hidden_units, dropout_rate, activation='relu', apply_initialization=False):
        super(PositionwiseFeedForward, self).__init__()
        self.activation = activation
        self.apply_initialization = apply_initialization
        self.w_1 = nn.Linear(idim, hidden_units * 2 if activation == 'glu' else hidden_units)
        self.w_2 = nn.Linear(hidden_units, idim)
        self.dropout = nn.Dropout(dropout_rate)

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, x):
        x = self.w_1(x)
        if self.activation == 'relu':
            x = F.relu(x)
        elif self.activation == 'glu':
            x = F.glu(x)
        elif self.activation == 'gelu':
            x = F.gelu(x)
        elif self.activation == 'swish':
            x = x * torch.sigmoid(x)
        else:
            raise NotImplementedError
        return self.w_2(self.dropout(x))

    def init_parameters(self):
        """Initialize parameters with Xavier uniform distribution."""
        for _, p in self.named_parameters():
            if p.dim() == 1:
                nn.init.constant_(p, 0.)  # bias
            elif p.dim() in [2, 3, 4]:
                nn.init.xavier_uniform_(p)
        logger.debug('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)

