Spaces:
Running
on
A10G
Running
on
A10G
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). | |
# ## Citations | |
# ```bibtex | |
# @inproceedings{yao2021wenet, | |
# title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, | |
# author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, | |
# booktitle={Proc. Interspeech}, | |
# year={2021}, | |
# address={Brno, Czech Republic }, | |
# organization={IEEE} | |
# } | |
# @article{zhang2022wenet, | |
# title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, | |
# author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, | |
# journal={arXiv preprint arXiv:2203.15455}, | |
# year={2022} | |
# } | |
# | |
"""ConvolutionModule definition.""" | |
from typing import Tuple | |
import torch | |
from torch import nn | |
class ConvolutionModule(nn.Module): | |
"""ConvolutionModule in Conformer model.""" | |
def __init__( | |
self, | |
channels: int, | |
kernel_size: int = 15, | |
activation: nn.Module = nn.ReLU(), | |
norm: str = "batch_norm", | |
causal: bool = False, | |
bias: bool = True, | |
adaptive_scale: bool = False, | |
init_weights: bool = False, | |
): | |
"""Construct an ConvolutionModule object. | |
Args: | |
channels (int): The number of channels of conv layers. | |
kernel_size (int): Kernel size of conv layers. | |
causal (int): Whether use causal convolution or not | |
""" | |
super().__init__() | |
self.bias = bias | |
self.channels = channels | |
self.kernel_size = kernel_size | |
self.adaptive_scale = adaptive_scale | |
self.ada_scale = torch.nn.Parameter( | |
torch.ones([1, 1, channels]), requires_grad=adaptive_scale | |
) | |
self.ada_bias = torch.nn.Parameter( | |
torch.zeros([1, 1, channels]), requires_grad=adaptive_scale | |
) | |
self.pointwise_conv1 = nn.Conv1d( | |
channels, | |
2 * channels, | |
kernel_size=1, | |
stride=1, | |
padding=0, | |
bias=bias, | |
) | |
# self.lorder is used to distinguish if it's a causal convolution, | |
# if self.lorder > 0: it's a causal convolution, the input will be | |
# padded with self.lorder frames on the left in forward. | |
# else: it's a symmetrical convolution | |
if causal: | |
padding = 0 | |
self.lorder = kernel_size - 1 | |
else: | |
# kernel_size should be an odd number for none causal convolution | |
assert (kernel_size - 1) % 2 == 0 | |
padding = (kernel_size - 1) // 2 | |
self.lorder = 0 | |
self.depthwise_conv = nn.Conv1d( | |
channels, | |
channels, | |
kernel_size, | |
stride=1, | |
padding=padding, | |
groups=channels, | |
bias=bias, | |
) | |
assert norm in ["batch_norm", "layer_norm"] | |
if norm == "batch_norm": | |
self.use_layer_norm = False | |
self.norm = nn.BatchNorm1d(channels) | |
else: | |
self.use_layer_norm = True | |
self.norm = nn.LayerNorm(channels) | |
self.pointwise_conv2 = nn.Conv1d( | |
channels, | |
channels, | |
kernel_size=1, | |
stride=1, | |
padding=0, | |
bias=bias, | |
) | |
self.activation = activation | |
if init_weights: | |
self.init_weights() | |
def init_weights(self): | |
pw_max = self.channels**-0.5 | |
dw_max = self.kernel_size**-0.5 | |
torch.nn.init.uniform_(self.pointwise_conv1.weight.data, -pw_max, pw_max) | |
if self.bias: | |
torch.nn.init.uniform_(self.pointwise_conv1.bias.data, -pw_max, pw_max) | |
torch.nn.init.uniform_(self.depthwise_conv.weight.data, -dw_max, dw_max) | |
if self.bias: | |
torch.nn.init.uniform_(self.depthwise_conv.bias.data, -dw_max, dw_max) | |
torch.nn.init.uniform_(self.pointwise_conv2.weight.data, -pw_max, pw_max) | |
if self.bias: | |
torch.nn.init.uniform_(self.pointwise_conv2.bias.data, -pw_max, pw_max) | |
def forward( | |
self, | |
x: torch.Tensor, | |
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), | |
cache: torch.Tensor = torch.zeros((0, 0, 0)), | |
) -> Tuple[torch.Tensor, torch.Tensor]: | |
"""Compute convolution module. | |
Args: | |
x (torch.Tensor): Input tensor (#batch, time, channels). | |
mask_pad (torch.Tensor): used for batch padding (#batch, 1, time), | |
(0, 0, 0) means fake mask. | |
cache (torch.Tensor): left context cache, it is only | |
used in causal convolution (#batch, channels, cache_t), | |
(0, 0, 0) meas fake cache. | |
Returns: | |
torch.Tensor: Output tensor (#batch, time, channels). | |
""" | |
if self.adaptive_scale: | |
x = self.ada_scale * x + self.ada_bias | |
# exchange the temporal dimension and the feature dimension | |
x = x.transpose(1, 2) # (#batch, channels, time) | |
# mask batch padding | |
if mask_pad.size(2) > 0: # time > 0 | |
x.masked_fill_(~mask_pad, 0.0) | |
if self.lorder > 0: | |
if cache.size(2) == 0: # cache_t == 0 | |
x = nn.functional.pad(x, (self.lorder, 0), "constant", 0.0) | |
else: | |
assert cache.size(0) == x.size(0) # equal batch | |
assert cache.size(1) == x.size(1) # equal channel | |
x = torch.cat((cache, x), dim=2) | |
assert x.size(2) > self.lorder | |
new_cache = x[:, :, -self.lorder :] | |
else: | |
# It's better we just return None if no cache is required, | |
# However, for JIT export, here we just fake one tensor instead of | |
# None. | |
new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) | |
# GLU mechanism | |
x = self.pointwise_conv1(x) # (batch, 2*channel, dim) | |
x = nn.functional.glu(x, dim=1) # (batch, channel, dim) | |
# 1D Depthwise Conv | |
x = self.depthwise_conv(x) | |
if self.use_layer_norm: | |
x = x.transpose(1, 2) | |
x = self.activation(self.norm(x)) | |
if self.use_layer_norm: | |
x = x.transpose(1, 2) | |
x = self.pointwise_conv2(x) | |
# mask batch padding | |
if mask_pad.size(2) > 0: # time > 0 | |
x.masked_fill_(~mask_pad, 0.0) | |
return x.transpose(1, 2), new_cache | |