# requirements
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Module, Linear
import numpy as np


class SirenLayer(Module):

  def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False):
    super().__init__()
    self.in_f = in_f
    self.w0 = w0
    self.linear = nn.Linear(in_f, out_f)
    self.is_first = is_first
    self.is_last = is_last
    self.init_weights()

  def init_weights(self):
    b = 1 / self.in_f if self.is_first else np.sqrt(6 / self.in_f) / self.w0
    with torch.no_grad():
      self.linear.weight.uniform_(-b, b)

  def forward(self, x):
    x = self.linear(x)
    return x if self.is_last else torch.sin(self.w0 * x)


def gon_model(dimensions):
  first_layer = SirenLayer(dimensions[0], dimensions[1], is_first=True)
  other_layers = []
  for dim0, dim1 in zip(dimensions[1:-2], dimensions[2:-1]):
    other_layers.append(SirenLayer(dim0, dim1))
  final_layer = SirenLayer(dimensions[-2], dimensions[-1], is_last=True)
  return nn.Sequential(first_layer, *other_layers, final_layer)


def Classifier(dimensions):
  layers = []
  for dim0, dim1 in zip(dimensions[:-1], dimensions[1:]):
    layers.append(Linear(dim0, dim1))
    layers.append(nn.ReLU(inplace=False))
  layers.pop()
  model = nn.Sequential(*layers)
  return model


def get_mgrid(sidelen, dim=2):
  tensors = tuple(dim * [torch.linspace(-1, 1, steps=sidelen)])
  mgrid = torch.stack(torch.meshgrid(*tensors), dim=-1)
  mgrid = mgrid.reshape([1, -1, dim])
  return mgrid


# model size is not controlled
class MultiGON(Module):

  def __init__(self, hyp_params, latent_dim) -> None:
    super(MultiGON, self).__init__()

    # model input/output size control
    self.orig_d_l, self.orig_d_a, self.orig_d_v = hyp_params.orig_d_l, hyp_params.orig_d_a, hyp_params.orig_d_v
    self.l_len, self.a_len, self.v_len = hyp_params.l_len, hyp_params.a_len, hyp_params.v_len
    self.nlayers = hyp_params.nlayers
    self.batch_size = hyp_params.batch_size
    output_dim = hyp_params.output_dim  # This is actually not a hyperparameter :-)

    # multi-head SIREN
    self.latent_dim = latent_dim
    self.dims = [1 + 3 + self.latent_dim, 256, 256, 256]
    self.input_layer = SirenLayer(self.dims[0], self.dims[1], is_first=True)
    self.hidden_layer = SirenLayer(self.dims[1], self.dims[2])
    self.hidden_layer_2 = SirenLayer(self.dims[2], self.dims[3])
    self.output_layer_a = SirenLayer(self.dims[3], self.orig_d_a, is_last=True)
    self.output_layer_v = SirenLayer(self.dims[3], self.orig_d_v, is_last=True)
    self.output_layer_l = SirenLayer(self.dims[3], self.orig_d_l, is_last=True)

    # grids for input latent vector
    self.grid_a = nn.Parameter(
        torch.cat([
            torch.tensor([[[1., 0, 0]]]).repeat([1, self.a_len, 1]),
            get_mgrid(self.a_len, dim=1)
        ],
                  dim=-1),
        requires_grad=False)
    self.grid_v = nn.Parameter(
        torch.cat([
            torch.tensor([[[0, 1., 0]]]).repeat([1, self.v_len, 1]),
            get_mgrid(self.v_len, dim=1)
        ],
                  dim=-1),
        requires_grad=False)
    self.grid_l = nn.Parameter(
        torch.cat([
            torch.tensor([[[0, 0, 1.]]]).repeat([1, self.l_len, 1]),
            get_mgrid(self.l_len, dim=1)
        ],
                  dim=-1),
        requires_grad=False)

  def forward(self, latent_vec):
    """
    input: latent_vec, [batch_size, 1, latent_dim], requires_grad
    """
    batch_size = latent_vec.shape[0]

    z_a = torch.cat([
        self.grid_a.repeat([batch_size, 1, 1]),
        latent_vec.repeat([1, self.a_len, 1])
    ],
                    dim=-1)
    rep_a = self.input_layer(z_a)
    rep_a = self.hidden_layer(rep_a)
    rep_a = self.hidden_layer_2(rep_a)
    rec_a = self.output_layer_a(rep_a)

    z_v = torch.cat([
        self.grid_v.repeat([batch_size, 1, 1]),
        latent_vec.repeat([1, self.v_len, 1])
    ],
                    dim=-1)
    rep_v = self.input_layer(z_v)
    rep_v = self.hidden_layer(rep_v)
    rep_v = self.hidden_layer_2(rep_v)
    rec_v = self.output_layer_v(rep_v)

    z_l = torch.cat([
        self.grid_l.repeat([batch_size, 1, 1]),
        latent_vec.repeat([1, self.l_len, 1])
    ],
                    dim=-1)
    rep_l = self.input_layer(z_l)
    rep_l = self.hidden_layer(rep_l)
    rep_l = self.hidden_layer_2(rep_l)
    rec_l = self.output_layer_l(rep_l)

    return rec_l, rec_a, rec_v


if __name__ == '__main__':
  from hyparams import hyparams
  hyp = hyparams()
  bs = hyp.batch_size
  latent_dim = 128
  device = torch.device('cpu')

  transformer_model = MultiGON(hyp, latent_dim).to(device)
  classifier = Classifier([latent_dim, 256, 8]).to(device)

  a_rand = torch.rand([bs, hyp.a_len, hyp.orig_d_a]).to(device)
  v_rand = torch.rand([bs, hyp.v_len, hyp.orig_d_v]).to(device)
  l_rand = torch.rand([bs, hyp.l_len, hyp.orig_d_l]).to(device)

  z = torch.zeros([bs, 1, latent_dim]).requires_grad_().to(device)
  l_rec, a_rec, v_rec = transformer_model(z)

  L_inner = (((l_rec - l_rand)**2).sum(1).mean() +
             ((a_rec - a_rand)**2).sum(1).mean() +
             ((v_rec - v_rand)**2).sum(1).mean()) / 3
  z = -torch.autograd.grad(
      L_inner, [z], create_graph=True, retain_graph=True)[0]

  prediction = classifier(z)

  print(z.shape)
