import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable


class CausalConv1d(nn.Conv1d):
    def __init__(self, in_channels, out_channels, kernel_size=2, stride=1, padding=1, dilation=1, groups=1, bias=False):
        # `dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
        # dilation控制卷积核是否有空隙
        super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)

    def forward(self, inputs):
        outputs = super().forward(inputs)
        return outputs[:, :, :-1]  # 去掉最后1组


class DilatedConv1d(nn.Conv1d):
    def __init__(self, in_channels, out_channels, kernel_size=2, stride=1, padding=0, dilation=1, groups=1, bias=False):
        # 和CausalConv1d区别, padding=0
        super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)

    def forward(self, inputs):
        outputs = super().forward(inputs)
        return outputs


class ResidualBlock(nn.Module):
    def __init__(self, res_channels, skip_channels, dilation):
        super().__init__()
        self.filter_conv = DilatedConv1d(in_channels=res_channels, out_channels=res_channels, dilation=dilation)
        self.gate_conv = DilatedConv1d(in_channels=res_channels, out_channels=res_channels, dilation=dilation)
        self.skip_conv = nn.Conv1d(in_channels=res_channels, out_channels=skip_channels, kernel_size=1)
        self.residual_conv = nn.Conv1d(in_channels=skip_channels, out_channels=res_channels, kernel_size=1)
        # Modify in_channels=res_channels to in_channels=skip_channels

    def forward(self, inputs):
        sigmoid_out = torch.sigmoid(self.gate_conv(inputs))
        """ inputs, shape (1, 32, n) -> (1, 32, n - dilation),
        the first "1" is the batch size.
        n - 1, because kernel_size=2.
        """
        tanh_out = torch.tanh(self.filter_conv(inputs))
        output = sigmoid_out * tanh_out
        # output, shape (1, 32, n - dilation)

        skip_out = self.skip_conv(output)
        """ output, shape (1, 32, n - dilation)
        -> skipout, shape (1, 512, n - dilation),
        because skip_channels=512, kernel_size=1
        """
        res_out = self.residual_conv(skip_out)
        # res_out, shape (1, 32, n - dilation)
        
        res_out = res_out + inputs[:, :, -res_out.size(2):]
        """加上inputs的值到res_out.
        inputs[:, :, -res_out.size(2):], 截取inputs尾部res_out长度的内容.
        """
        
        return res_out, skip_out
        """res_out, shape (1, 32, n - dilation).
        skip_out, shape (1, 512, n - dilation).
        每次卷积最后1维的长度会变小.
        """


class WaveNet(nn.Module):
    def __init__(self, in_depth=256, res_channels=32, skip_channels=512, dilation_depth=10, n_repeat=5):
        super().__init__()
        self.dilations = [2**i for i in range(dilation_depth)] * n_repeat
        # [1, 2, 4,..., 512, 1, 2, 4,...,512,...,512], 共有50个数
        self.main = nn.ModuleList([ResidualBlock(res_channels, skip_channels, dilation) for dilation in self.dilations])
        self.pre = nn.Embedding(in_depth, res_channels)
        """self.pre generates in_depth (int) res_channels-dimentional vectors, 
        and maps each incoming number (int) to the corresponding vector.
        相当于给传入的数据增加1个长度为res_chanels的维度.
        """
        # self.pre_conv = CausalConv1d(in_channels=res_channels, out_channels=res_channels)
        self.post = nn.Sequential(nn.ReLU(), nn.Conv1d(skip_channels, skip_channels, 1), nn.ReLU(),
                                  nn.Conv1d(skip_channels, in_depth, 1))

    def forward(self, inputs):
        outputs = self.preprocess(inputs)
        skip_connections = []

        for layer in self.main:
            outputs, skip = layer(outputs)
            skip_connections.append(skip)

        outputs = sum([s[:, :, -outputs.size(2):] for s in skip_connections])
        # 截取最终outputs长度, 加总50层(轮)卷积值. shape (1, 512, m), because skip_channels=512
        outputs = self.post(outputs)  # outputs, shape (1, 512, m) -> (1, 256, m)

        return outputs

    def preprocess(self, inputs):
        out = self.pre(inputs).transpose(1, 2)
        # inputs, shape (1, n) -> (1, n, 32) -> (1, 32, n), res_channels=32
        
        # out = self.pre_conv(out)
        return out
