import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.module.pos import PositionalEncoding
from oasr.module.rnn import CommonRNNLayer
from oasr.encoder.transformer import TransformerEncoderLayer as TransformerLayer
from oasr.decoder.utils import get_transformer_decoder_mask


class StateLessTransducerDecoder(nn.Module):
    def __init__(self, vocab_size, d_model, dropout=0.0):
        super(StateLessTransducerDecoder, self).__init__()

        self.model_type = 'state-less'
        self.output_size = d_model
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, targets):
        return self.dropout(self.embedding(targets))
    
    def inference(self, targets):
        return self.embedding(targets)


class Conv1DStateLessTransducerDecoder(StateLessTransducerDecoder):
    def __init__(self, kernel_size, residual, vocab_size, d_model, dropout):
        super().__init__(vocab_size, d_model, dropout=dropout)

        self.model_type = 'conv-state-less'
        self.residual = residual
        self.kernel_size = kernel_size
        self.conv = nn.Conv1d(
            d_model,
            d_model,
            kernel_size=kernel_size,
            stride=1,
            padding=0,
            groups=d_model
        )

        self.output_layer = nn.Linear(d_model, d_model)
        self.out_dropout = nn.Dropout(dropout)

    def forward(self, targets):
        emb = self.dropout(self.embedding(targets))
        padded_emb = F.pad(emb, pad=(0, 0, self.kernel_size-1, 0), value=0.0)
        out = self.conv(padded_emb.transpose(1, 2)).transpose(1, 2)
        return self.out_dropout(self.output_layer(out + emb if self.residual else out))

    def inference(self, targets):
        return super().forward(targets)


class TransducerTransformerDecoder(nn.Module):
    def __init__(self, vocab_size, n_blocks, d_model, n_heads, d_ff, dropout=0.1,
                 normalize_before=False, concat_after=False, activation='glu'):
        super(TransducerTransformerDecoder, self).__init__()

        self.model_type = 'transformer'

        self.normalize_before = False
        self.vocab_size = vocab_size
        self.output_size = d_model
        self.relative_positional = False

        self.embedding = nn.Embedding(self.vocab_size, d_model)
        self.pos_emb = PositionalEncoding(d_model, 0.0)

        self.blocks = nn.ModuleList([
            TransformerLayer(
                n_heads=n_heads,
                d_model=d_model,
                d_ff=d_ff,
                slf_attn_dropout=0.0, 
                ffn_dropout=0.0,
                residual_dropout=dropout,
                normalize_before=normalize_before,
                concat_after=concat_after,
                activation=activation
            ) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.after_norm = nn.LayerNorm(d_model)

    def _pos_encoding(self, dec_output):
        if self.relative_positional:
            # [1, 2T - 1]
            position = torch.arange(-(dec_output.size(1)-1), dec_output.size(1), device=dec_output.device).reshape(1, -1)
            pos = self.pos_emb._embedding_from_positions(position)
        else:  
            dec_output, pos = self.pos_emb(dec_output)
        return dec_output, pos

    def forward(self, targets):

        dec_mask = get_transformer_decoder_mask(targets)
        dec_output = self.embedding(targets)
        dec_output, pos = self._pos_encoding(dec_output)

        for _, block in enumerate(self.blocks):
            dec_output, _ = block(dec_output, dec_mask, pos)

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        return dec_output

    def inference(self, targets):

        dec_output = self.embedding(targets)
        dec_output, pos = self._pos_encoding(dec_output)

        dec_mask = get_transformer_decoder_mask(targets)

        for _, block in enumerate(self.blocks):
            dec_output, _ = block(dec_output, dec_mask, pos)

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        return dec_output


class TransducerRecurrentDecoder(nn.Module):
    def __init__(self, vocab_size, embedding_dim, num_layers, hidden_size, project_size, layer_norm, dropout, rnn_type='lstm'):
        super(TransducerRecurrentDecoder, self).__init__()

        self.model_type = 'rnn'

        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.num_layers = num_layers
        self.rnn_type = rnn_type
        self.hidden_size = hidden_size

        self.blocks = nn.ModuleList(
            [
                CommonRNNLayer(
                    input_size=embedding_dim if i == 0 else project_size,
                    hidden_size=hidden_size,
                    project_size=project_size,
                    layer_norm=layer_norm,
                    dropout=dropout,
                    bidirectional=False,
                    rnn_type=rnn_type
                ) for i in range(num_layers)
            ]
        )

        self.output_size = project_size

    def forward(self, targets, hidden=None):

        dec_output = self.embedding(targets)
        new_hidden = []
        for i, block in enumerate(self.blocks):
            dec_output, block_hidden = block(dec_output, hidden=hidden[i] if hidden is not None else None)
            new_hidden.append(block_hidden)
        return dec_output, new_hidden

    def inference(self, targets, hidden=None):
        return self.forward(targets, hidden)

    def init_hidden_states(self, batch_size, device):

        hidden = []
        for _ in range(self.num_layers):
            if self.rnn_type == 'lstm':
                h = torch.zeros([1, batch_size, self.hidden_size]).to(device)
                c = torch.zeros([1, batch_size, self.hidden_size]).to(device)
                init_state = (h, c)
            else:
                init_state = torch.zeros([1, batch_size, self.hidden_size]).to(device)
            hidden.append(init_state)

        return hidden
        

TransducerDecoder = {
    'transformer': TransducerTransformerDecoder,
    'rnn': TransducerRecurrentDecoder,
    'state-less': StateLessTransducerDecoder,
    'conv-state-less': Conv1DStateLessTransducerDecoder
}