#coding:utf-8
import torch
from torch import Tensor
from torch import nn
from torchsummary import summary

class Wav2Letter(nn.Module):
    r"""Wav2Letter model architecture from the `Wav2Letter an End-to-End ConvNet-based Speech Recognition System`_.

    .. _Wav2Letter an End-to-End ConvNet-based Speech Recognition System: https://arxiv.org/abs/1609.03193

     :math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`

    Args:
        num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
        input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
         or ``mfcc`` (Default: ``waveform``).
        num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
    """

    def __init__(self, num_classes=40, input_type="waveform", num_features=1):
        super(Wav2Letter, self).__init__()

        acoustic_num_features = 250 if input_type == "waveform" else num_features
        acoustic_model = nn.Sequential(
            nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=1, padding=23), ## dim: x -> (x-48+2*23)//2+1=x//2
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
            nn.ReLU(inplace=True)
        )



        if input_type == "waveform":
            waveform_model = nn.Sequential(
                nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
                nn.ReLU(inplace=True)
            )
            self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)

        if input_type in ["power_spectrum", "mfcc"]:
            self.acoustic_model = acoustic_model

    def forward(self, x):
        r"""
        Args:
            x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).

        Returns:
            Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
        """
        x = self.acoustic_model(x)
        return x

class RNN(nn.Module):
    def __init__(self, num_classes=40, num_features=80, model_type="lstm"):
        """
           model_type: lstm/blstm/rnn/brnn/gru/bgru
        """
        super(RNN, self).__init__()
        self.use_bidirection = True if model_type.startswith("b") else False
        self.num_direction = 1 + self.use_bidirection
        self.hidden_size = 512
        self.num_layers = 4
        args = {
                   "input_size": num_features,
                   "hidden_size": self.hidden_size,
                   "num_layers": self.num_layers,
                   "batch_first": True,
                   "bidirectional": self.use_bidirection
               } 
        if model_type.endswith("lstm"):
            self.rnn_layer = nn.LSTM(**args)
        elif model_type.endswith("gru"):
            self.rnn_layer = nn.GRU(**args)
        elif model_type.endswith("rnn"):
            self.rnn_layer = nn.RNN(**args)
        else:
            raise ValueError("No such model type are supported!")
        self.output = nn.Sequential(
            nn.Linear(self.hidden_size * self.num_direction, num_classes),
            nn.ReLU(inplace=True))
    def forward(self, x):
        x, _ = self.rnn_layer(x)
        x = self.output(x)
        return x
        

if __name__ == "__main__":
    # # 测试波形的使用
    # from scipy.io import wavfile
    # import numpy as np
    # num = 4
    # net = Wav2Letter(num_classes=40, input_type="waveform", num_features=1)
    # summary(net, input_size=(1, 16000))
    # input_path = "C:/Users/zhangpengshen/Desktop/SA1.WAV.wav"
    # sample_rate, signal = wavfile.read(input_path)
    # print(np.ceil((len(signal)//num)/(sample_rate*0.02)))
    # signal = np.reshape(signal.astype(np.float32)[:len(signal)//num*num], (num, 1, len(signal)//num))
    # output = net(torch.from_numpy(signal).cpu())
    # print(output)

    # 测试MFCC的使用
    net = Wav2Letter(num_classes=40, input_type="mfcc", num_features=80)
    for m in net.modules():
        if isinstance(m, nn.Conv1d):
            nn.init.kaiming_normal_(m.weight.data)  # 卷积层参数初始化
            m.bias.data.fill_(0)
    #net = RNN(num_classes=40, num_features=80, model_type="blstm")
    net = net.cuda()
    summary(net, input_size=(80, 80))
    input_ = torch.randn(3, 80, 80).cuda()
    output_ = net(input_)
    print(output_.size())
    net.eval()
    torch.save(net.state_dict(), "tmp.pt")
    #for i in range(3):
    #    print(input_[i].cpu().numpy())
    #    print(output_[i])
    #    print("-----------------------")
