import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from src.net import MaskConv, get_seq_lens


class SequenceWise(nn.Module):
    def __init__(self, module):
        """
        Collapses input of dim T*N*H to (T*N)*H, and applies to a module.
        Allows handling of variable sequence lengths and minibatch sizes.
        :param module: Module to apply input to.
        """
        super(SequenceWise, self).__init__()
        self.module = module

    def forward(self, x):
        t, n = x.size(0), x.size(1)
        x = x.view(t * n, -1)
        x = self.module(x)
        x = x.view(t, n, -1)
        return x

    def __repr__(self):
        tmpstr = self.__class__.__name__ + ' (\n'
        tmpstr += self.module.__repr__()
        tmpstr += ')'
        return tmpstr


class Lookahead(nn.Module):
    # Wang et al 2016 - Lookahead Convolution Layer for Unidirectional Recurrent Neural Networks
    # input shape - sequence, batch, feature - TxNxH
    # output shape - same as input
    def __init__(self, n_features, context):
        super(Lookahead, self).__init__()
        assert context > 0
        self.context = context
        self.n_features = n_features
        self.pad = (0, self.context - 1)
        self.conv = nn.Conv1d(
            self.n_features,
            self.n_features,
            kernel_size=self.context,
            stride=1,
            groups=self.n_features,
            padding=0,
            bias=False
        )

    def forward(self, x):
        x = x.transpose(0, 1).transpose(1, 2)
        x = F.pad(x, pad=self.pad, value=0)
        x = self.conv(x)
        x = x.transpose(1, 2).transpose(0, 1).contiguous()
        return x

    def __repr__(self):
        return self.__class__.__name__ + '(' \
               + 'n_features=' + str(self.n_features) \
               + ', context=' + str(self.context) + ')'


class BatchRNN(nn.Module):
    def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True, input_sorted=True):
        super(BatchRNN, self).__init__()
        self.input_sorted = input_sorted
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.bidirectional = bidirectional
        self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size)) if batch_norm else None
        self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
                            bidirectional=bidirectional, bias=True)
        self.num_directions = 2 if bidirectional else 1

    def flatten_parameters(self):
        self.rnn.flatten_parameters()

    def forward(self, x, output_lengths):
        if self.batch_norm is not None:
            x = self.batch_norm(x)
        x = nn.utils.rnn.pack_padded_sequence(x, output_lengths.cpu(), enforce_sorted=self.input_sorted)
        x, h = self.rnn(x)
        x, _ = nn.utils.rnn.pad_packed_sequence(x)
        if self.bidirectional:
            x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1)  # (TxNxH*2) -> (TxNxH) by sum
        return x


class DeepSpeech(nn.Module):
    def __init__(self, input_size, rnn_hidden_size, rnn_hidden_layers, output_size,
                 cnn1_ksize=(41, 11), cnn1_stride=(2, 1), cnn2_ksize=(21, 11), cnn2_stride=(2, 1),
                 bidirectional=False, input_sorted=False, lookahead_context=3):
        super().__init__()

        self.hidden_size = rnn_hidden_size
        self.rnn_hidden_layers = rnn_hidden_layers
        self.rnn_type = nn.LSTM

        self.lookahead_context = lookahead_context
        self.bidirectional = bidirectional

        cnn1_psize = (cnn1_ksize[0] // 2, cnn1_ksize[0] // 1)
        cnn2_psize = (cnn2_ksize[0] // 2, cnn2_ksize[0] // 1)

        self.conv = MaskConv(nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=cnn1_ksize, stride=cnn1_stride, padding=cnn1_psize),  ### stride=(2, 2)
            nn.BatchNorm2d(32),
            nn.Hardtanh(0, 20, inplace=True),
            nn.Conv2d(32, 32, kernel_size=cnn2_ksize, stride=cnn2_stride, padding=cnn2_psize),
            nn.BatchNorm2d(32),
            nn.Hardtanh(0, 20, inplace=True)
        ))
        # Based on above convolutions and spectrogram size using conv formula (input_size[0] - kernel_size[0] + 2*padding[0])/ stride[0] + 1
        rnn_input_size = int(math.floor(input_size + 2 * cnn1_psize[0] - cnn1_ksize[0]) / cnn1_stride[0] + 1)
        rnn_input_size = int(math.floor(rnn_input_size + 2 * cnn2_psize[0] - cnn2_ksize[0]) / cnn2_stride[0] + 1)
        rnn_input_size *= 32

        self.rnns = nn.Sequential(
            BatchRNN(
                input_size=rnn_input_size,
                hidden_size=self.hidden_size,
                rnn_type=self.rnn_type,
                bidirectional=self.bidirectional,
                batch_norm=False,
                input_sorted=input_sorted
            ),
            *(
                BatchRNN(
                    input_size=self.hidden_size,
                    hidden_size=self.hidden_size,
                    rnn_type=self.rnn_type,
                    bidirectional=self.bidirectional,
                    input_sorted=input_sorted
                ) for x in range(self.rnn_hidden_layers - 1)
            )
        )

        self.lookahead = nn.Sequential(
            # consider adding batch norm?
            Lookahead(self.hidden_size, context=self.lookahead_context),
            nn.Hardtanh(0, 20, inplace=True)
        ) if not self.bidirectional else None

        fully_connected = nn.Sequential(
            nn.BatchNorm1d(self.hidden_size),
            nn.Linear(self.hidden_size, output_size, bias=False)
        )
        self.fc = nn.Sequential(
            SequenceWise(fully_connected),
        )

    def forward(self, x, lengths):
        x = torch.unsqueeze(x, 1).transpose(2, 3)

        lengths = lengths.cpu().int()
        # output_lengths = self.get_seq_lens(lengths)
        output_lengths = get_seq_lens(self.conv.modules(), lengths)
        if x.is_cuda:
            output_lengths = output_lengths.cuda()
        x, _ = self.conv(x, output_lengths)

        sizes = x.size()
        x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3])  # Collapse feature dimension
        x = x.transpose(1, 2).transpose(0, 1).contiguous()  # TxNxH

        for rnn in self.rnns:
            x = rnn(x, output_lengths)

        if not self.bidirectional:  # no need for lookahead layer in bidirectional
            x = self.lookahead(x)

        x = self.fc(x)
        x = x.transpose(0, 1)
        return x, output_lengths

    def get_seq_lens(self, input_length):
        """
        Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable
        containing the size sequences that will be output by the network.
        :param input_length: 1D Tensor
        :return: 1D Tensor scaled by model
        """
        seq_len = input_length
        for m in self.conv.modules():
            if type(m) == nn.modules.conv.Conv2d:
                seq_len = ((seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1) // m.stride[1] + 1)
        return seq_len.int()
