import torch
import numpy as np
from torch import nn
from torch import einsum

class PfnnParameter(nn.Module):
    def __init__(self,
                 shape,
                 ):
        super().__init__()
        # shape
        self.num_control = shape[0] # number of experts, here is 4
        self.weight_shape = shape # shape of the weight matrices, here is 4 x out_dim x in_dim
        self.bias_shape = shape[:-1] # shape of the bias vectors, here is 4 x out_dim

        # alpha and beta (Here alpha and beta are weights and bias for
        # four experts)
        self.alpha = torch.nn.Parameter(self.initial_alpha(), requires_grad=True)
        self.beta = torch.nn.Parameter(self.initial_beta(), requires_grad=True)

    def forward(self, x, phase):
        pindex_1, bamount, wamount = self.getExpertWeight(phase)
        pindex_0 = (pindex_1 - 1) % self.num_control
        pindex_2 = (pindex_1 + 1) % self.num_control
        pindex_3 = (pindex_1 + 2) % self.num_control

        # weight and bias, the final aggregated Parameters
        weight = self.average_weight(pindex_0, pindex_1, pindex_2, pindex_3, wamount)
        bias = self.average_bias(pindex_0, pindex_1, pindex_2, pindex_3, bamount)
        x = einsum('bmn,bn->bm', weight, x)
        x = x + bias
        # x = weight.squeeze() @ x + bias.squeeze()
        return x

    def initial_alpha(self):
        shape = self.weight_shape
        alpha_bound = np.sqrt(6. / np.prod(shape[-2:]))
        alpha = np.asarray(
            np.random.uniform(low=-alpha_bound, high=alpha_bound,
                              size=shape).astype(np.float32)
        )
        return torch.from_numpy(alpha)

    def initial_beta(self):
        '''
        Zero initialize the bias
        '''
        return torch.zeros(self.bias_shape)

    def getExpertWeight(self, phase):
        num_experts = self.num_control
        phase_scale = num_experts * phase
        phase_amount = phase_scale % 1.0
        phase_index_1 = phase_scale.long() % num_experts

        bias_amount = phase_amount
        weight_amount = bias_amount.unsqueeze(1)
        return phase_index_1, bias_amount, weight_amount

    def average_weight(self,
                       pindex_0,
                       pindex_1,
                       pindex_2,
                       pindex_3,
                       mu):
        b, *_ = pindex_0.shape
        pindex_0 = pindex_0.view(b, 1, 1).expand(b, *self.weight_shape[1:])
        pindex_1 = pindex_1.view(b, 1, 1).expand(b, *self.weight_shape[1:])
        pindex_2 = pindex_2.view(b, 1, 1).expand(b, *self.weight_shape[1:])
        pindex_3 = pindex_3.view(b, 1, 1).expand(b, *self.weight_shape[1:])
        w0 = torch.gather(self.alpha, dim = 0, index = pindex_0)
        w1 = torch.gather(self.alpha, dim = 0, index = pindex_1)
        w2 = torch.gather(self.alpha, dim = 0, index = pindex_2)
        w3 = torch.gather(self.alpha, dim = 0, index = pindex_3)
        return self.cubic(w0, w1, w2, w3, mu)

    def average_bias(self,
                     pindex_0,
                     pindex_1,
                     pindex_2,
                     pindex_3,
                     mu):
        b, *_ = pindex_0.shape
        pindex_0 = pindex_0.expand(b, *self.bias_shape[1:])
        pindex_1 = pindex_1.expand(b, *self.bias_shape[1:])
        pindex_2 = pindex_2.expand(b, *self.bias_shape[1:])
        pindex_3 = pindex_3.expand(b, *self.bias_shape[1:])
        b0 = torch.gather(self.beta, dim = 0, index = pindex_0)
        b1 = torch.gather(self.beta, dim = 0, index = pindex_1)
        b2 = torch.gather(self.beta, dim = 0, index = pindex_2)
        b3 = torch.gather(self.beta, dim = 0, index = pindex_3)
        return self.cubic(b0, b1, b2, b3, mu)

    @staticmethod
    def cubic(y0, y1, y2, y3, mu):
        '''
            Catmull ROM spline
        '''
        return 0.5 * ((-y0 + 3.*y1 - 3.*y2 + y3) * mu ** 3 +
            (2.*y0 - 5.*y1 + 4.*y2 -y3) * mu ** 2 +
            (-y0 + y2) * mu +
            2*y1)


if __name__ == "__main__":
    net = PfnnParameter((4, 311, 342))
    x = torch.zeros(16, 342)
    p = torch.zeros(16, 1)
    out = net(x, p)
    print(out)
    