import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.nn.parameter import Parameter

#
# 本模型中的PreLayer 和StartLayer 在一定程度上实现了MFCC.
# MFCC 的整个过程包括：
# 1. 时域滤波
# 2. FFT 并计算功率谱
# 3. 计算Mel-spaced filterbank
# 4. 对mel功率谱取对数
# 5. 离散余弦变换
#
# 第一步和第二步在进入PreLayer之前完成。若将功率谱输入表示为x, 则第三步到第五步
# 可以表示为 D * log(M * x), 其中D是DCT矩阵，M 为 Mel-spaced filterbank的等效
# 矩阵。本模型中，PreLayer 实现log(M*x), StartLayer 实现了DCT。
#
# 矩阵M的一个重要性质是每行的值均为非负数，在PreLayer中，对weight的每一行首先zo
# softmax运算，保证了非负的性质。PreLayer的bias为1.0，这样可以保证log的输入值
# 始终大于0，避免发生log(0)的情况。
#
# DCT 矩阵作为正交矩阵，除第一行外，其余行的每行元素的和均为0。StartLayer 利用
# 这一性质，将weight选定为行的和为0的矩阵。

class PreLayer(nn.Module):
    def __init__(self, in_features, out_features):
        super(PreLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.Tensor(out_features, in_features))
        # bias 每个元素都始终为1
        self.bias = torch.ones(out_features)
        self.reset_parameters()

    def reset_parameters(self):
        self.weight.data.uniform_(-0.1, 0.1)

    def forward(self, x):
        # weight 每行均做softmax处理
        self.weight_s = F.softmax(self.weight, dim=1)
        x = F.linear(x, self.weight_s, self.bias)
        x = torch.log(x)
        x = x.permute(0, 2, 1)
        return x


class StartLayer(nn.Module):
    '''
        StartLayer 的 weight 每一行的和是0，这一特征正好符合DCT正交化矩阵的特征.
    '''

    def __init__(self, in_features, out_features, kernel_size):
        super(StartLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.kernel_size = kernel_size
        self.weight = Parameter(torch.Tensor(out_features, in_features, kernel_size))
        self.bias = Parameter(torch.Tensor(out_features))
        self.reset_parameters()

    def reset_parameters(self):
        self.weight.data.uniform_(-0.1, 0.1)

    def forward(self, x):
        weight_row_average = torch.mean(self.weight, 1, keepdim=True)
        self.weight_row_sum_zeroed = self.weight - weight_row_average.repeat(1, self.in_features, 1)
        x = F.conv1d(x, self.weight_row_sum_zeroed, self.bias)
        x = torch.sigmoid(x)
        return x

class TemporalBlock(nn.Module):
    def __init__(self, n_inputs, n_outputs, kernel_size, dilation, active):
        super(TemporalBlock, self).__init__()
        self.conv = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size, stride=1, dilation=dilation, bias=True))
        self.active = active
        self.init_weights()

    def init_weights(self):
        self.conv.weight.data.normal_(0, 0.01)
        self.conv.bias.data.uniform_(-0.1, 0.1)

    def forward(self, x):
        x = self.conv(x)
        if self.active is not None:
            x = self.active(x)
        return x

class Net(nn.Module):
    def __init__(self, in_features, out_features):
        super(Net, self).__init__()
        self.layerPre = PreLayer(in_features=in_features, out_features=40)
        self.layer0 = StartLayer(in_features=40, out_features=31, kernel_size=3)
        self.layer1 = TemporalBlock(n_inputs=31, n_outputs=31, kernel_size=3, dilation=2, active=torch.sigmoid)
        self.layer2 = TemporalBlock(n_inputs=31, n_outputs=21, kernel_size=3, dilation=4, active=torch.sigmoid)
        self.layer3 = TemporalBlock(n_inputs=21, n_outputs=21, kernel_size=3, dilation=8, active=torch.sigmoid)
        self.layer4 = TemporalBlock(n_inputs=21, n_outputs=out_features, kernel_size=3, dilation=16, active=None)

    def forward(self, x):
        x = self.layerPre(x)
        x = self.layer0(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        return x

    @staticmethod
    def _dump_param(tensor,name_for_sw,fd):
        model_node = np.ndarray.flatten(tensor.data.numpy()).tolist()
        print ("float {name}[{length}] = {{{value}}};".format(name=name_for_sw, length=len(model_node), value = ", ".join("{:.10f}".format(f) for f in model_node)), file=fd)

    def dump_param(self, f):
        with open(f, "w") as fd:
            self._dump_param(self.layerPre.weight_s,'Wpre',fd)
            self._dump_param(self.layer0.weight_row_sum_zeroed.permute(0, 2, 1),'W0',fd)
            self._dump_param(self.layer1.conv.weight.permute(0, 2, 1),'W1',fd)
            self._dump_param(self.layer2.conv.weight.permute(0, 2, 1),'W2',fd)
            self._dump_param(self.layer3.conv.weight.permute(0, 2, 1),'W3',fd)
            self._dump_param(self.layer4.conv.weight.permute(0, 2, 1),'W4',fd)
            self._dump_param(self.layerPre.bias,'Bpre',fd)
            self._dump_param(self.layer0.bias,'B0',fd)
            self._dump_param(self.layer1.conv.bias,'B1',fd)
            self._dump_param(self.layer2.conv.bias,'B2',fd)
            self._dump_param(self.layer3.conv.bias,'B3',fd)
            self._dump_param(self.layer4.conv.bias,'B4',fd)
