import torch
from torch import nn
from torch.nn import functional as F
from typing import Tuple
from .hexplane import HexPlaneField

class MLP(nn.Module):
    def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True):
        super().__init__()
        self.dim_in = dim_in
        self.dim_out = dim_out
        self.dim_hidden = dim_hidden
        self.num_layers = num_layers

        net = []
        for l in range(num_layers):
            net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias))
        self.net = nn.ModuleList(net)
    
    def forward(self, x):
        for l in range(self.num_layers):
          x = self.net[l](x)
          if l != self.num_layers - 1:
            x = F.leaky_relu(x, inplace=True)
        return x

class MultiHeadAttention(nn.Module):
    def __init__(self,d_model, n_head):
        super(MultiHeadAttention, self).__init__()
        self.n_head = n_head
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_concat = nn.Linear(d_model, d_model)
        self.softmax = nn.Softmax(dim=-1)
        
    def split(self, tensor):
            batch_size, length, d_model = tensor.size()
            d_tensor = d_model // self.n_head
            tensor = tensor.view(batch_size, length, self.n_head, d_tensor).transpose(1, 2)
            return tensor
        
    def concat(self, tensor):
            batch_size, head, length, d_tensor = tensor.size()
            d_model = head * d_tensor
            tensor = tensor.transpose(1, 2).contiguous().view(batch_size, length, d_model)
            return tensor
        
    def forward(self, q, k, v, mask=None):
        # 1. dot product with weight matrices
        q, k, v = self.w_q(q), self.w_k(k), self.w_v(v)
        # 2. split tensor by number of heads
        q, k, v = self.split(q), self.split(k), self.split(v)
        # 3. do scale dot product to compute softmax similarity and output
        B, H, L, D = k.shape
        k_t = k.transpose(2, 3)
        weight = self.softmax(torch.matmul(q, k_t) / (D ** 0.5))
        out = weight @ v
        # 4. concat and pass to linear layer
        out = self.concat(out)
        out = self.w_concat(out)
        return out, weight
        

class AttentionLayer(nn.Module):
    def __init__(self, d_model, nhead, hidden, drop_prob=0.):
        super(AttentionLayer, self).__init__()
                
        self.enc_dec_attention = MultiHeadAttention(d_model, nhead)
        self.norm1 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(p=drop_prob)

        self.ffn = nn.Sequential(
            nn.Linear(d_model, hidden),
            nn.LeakyReLU(),
            nn.Linear(hidden, d_model),
            nn.Dropout(drop_prob)
        )
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout2 = nn.Dropout(drop_prob)
        
    def forward(self, x, enc_source) -> Tuple[torch.Tensor, torch.Tensor]:
        _x = x
        x, att = self.enc_dec_attention(q=x, k=enc_source, v=enc_source, mask=None)
        
        x = self.dropout1(x)
        x = self.norm1(x + _x)

        _x = x
        x = self.ffn(x)
        
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        return x, att
    

class AttentionModule(nn.Module):
    def __init__(self, n_layer, d_model, nhead, hidden, drop_prob=0.):
        super(AttentionModule, self).__init__()
        self.layers = nn.ModuleList([AttentionLayer(d_model, nhead, hidden, drop_prob) for _ in range(n_layer)])
        
    def forward(self,x, enc_source):
        attention = []
        for layer in self.layers:
          layer: AttentionLayer
          x, att = layer.forward(x, enc_source)
          attention.append(att.mean(dim=1).unsqueeze(dim=1)) # B, 1(avg of all heads), N, 3, 
        attention = torch.cat(attention,dim=1) #B, layer, N, 3
        return x, attention

class DeformNetwork(nn.Module):
  
    def __init__(self, 
                 d_latent=128, 
                 max_sh_degree=3,
                 bounds=0.2,
                 ):
        super(DeformNetwork, self).__init__()
               
        # triplane feature
        kplanes_config = {
        'grid_dimensions': 2,
        'input_coordinate_dim': 3, # xyz
        'output_coordinate_dim': 32, # output dim = 32 * 2
        'resolution': [64, 64, 64]
        }
        multires = [1,2]
        self.init_bounds = bounds
        self.bounds = bounds
        self.triplane = HexPlaneField(bounds=self.bounds, planeconfig=kplanes_config, multires=multires)
        
        # parameters
        self.d_triplane = kplanes_config['grid_dimensions'] * kplanes_config['output_coordinate_dim']
        self.d_attn = d_attn = self.d_triplane # f1 = d_triplane
        self.d_latent = d_latent
        self.shs_coef_num = shs_coef_num = sum([2 * i + 1 for i in range(max_sh_degree + 1)])
        
        # Multi-Head Attention layer
        self.transformer = AttentionModule(3, d_attn, 4, d_latent) # Layer_Num, (D_in, D_out), Head_Num, D_hidden
        
        # MLP for processing the feature
        # part of key and value
        self.dynamic_modules   = nn.ModuleDict({
          "ave": MLP(512, 4 * d_attn, d_latent, 4), # 4 head
          "shape": MLP(300, d_attn, d_latent, 2),
          "expr": MLP(100, d_attn, d_latent, 2),
          "global_pose": MLP(3, d_attn, d_latent, 2),
          "neck_pose": MLP(3, d_attn, d_latent, 2),
          "jaw_pose": MLP(3, d_attn, d_latent, 2),
          "eye_pose": MLP(6, d_attn, d_latent, 2),
        })
        
        # static (xyz -> feature -> params)
        self.static_triplane   = MLP(self.d_triplane, self.d_triplane, self.d_latent, 7)
        self.static_scaling    = MLP(self.d_triplane, 2, self.d_latent, 3)
        self.static_quaternion = MLP(self.d_triplane, 4, self.d_latent, 3)
        self.static_opacity    = MLP(self.d_triplane, 1, self.d_latent, 3)
        self.static_feature    = MLP(self.d_triplane, shs_coef_num*3, self.d_latent, 3)
        
        # deform (xyz -> feature -> d_params)
        self.deform_xyz        = MLP(self.d_attn, 3, self.d_latent, 3)
        self.deform_scaling    = MLP(self.d_attn, 2, self.d_latent, 3)
        self.deform_quaternion = MLP(self.d_attn, 4, self.d_latent, 3)
        self.deform_opacity    = MLP(self.d_attn, 1, self.d_latent, 3)
        self.deform_feature    = MLP(self.d_attn, shs_coef_num*3, self.d_latent, 3)
        
        # init parameters
        self.apply(self.init_weights)
        
    @staticmethod
    def init_weights(m):
      if isinstance(m, nn.Linear):
        nn.init.xavier_uniform_(m.weight,gain=1)
        if m.bias is not None:
          nn.init.zeros_(m.bias)
        
    @property
    def get_aabb(self):
        return self.triplane.aabb[0], self.triplane.aabb[1]
      
    def set_aabb(self,xyz_max, xyz_min):
      self.triplane.set_aabb(xyz_max, xyz_min)
      
    @torch.no_grad()
    def set_scale(self, scale):
      self.set_aabb(self.init_bounds * scale, - self.init_bounds * scale)
      self.bounds = self.bounds * scale
        
    def forward_static(self,
                       xyz: torch.Tensor, # B, N, 3
                       ):
      batch_size = xyz.size(0)
      assert batch_size == 1 # static 下仅输入一个样本
      xyz_tri_feat = self.triplane.forward(xyz.view(-1, 3))
      xyz_tri_feat = self.static_triplane(xyz_tri_feat)
      latent = F.leaky_relu(xyz_tri_feat).view(batch_size, -1, self.d_triplane)
      scale      = self.static_scaling.forward(latent)
      quaternion = self.static_quaternion.forward(latent)
      opacity    = self.static_opacity.forward(latent)
      feature    = self.static_feature.forward(latent).view(batch_size, -1, self.shs_coef_num, 3)
      return xyz, scale, quaternion, opacity, feature
    
    def forward_dynamic(self, 
                        xyz: torch.Tensor,  # B, N, 3
                        **kwargs, 
                        ):
      batch_size = xyz.size(0)
      enc_pts         = self.triplane.forward(xyz.view(-1, 3))
      enc_tri_feat    = F.leaky_relu(self.static_triplane(enc_pts)).view(batch_size, -1, self.d_triplane) # B, N, d_attn
      if ave_feat is None:
        ave_feat = torch.zeros(batch_size, 512).to(enc_tri_feat)
        
      extra_feats = []
      for key in kwargs:
        value = kwargs[key]
        if key in self.dynamic_modules:
          enc_module = self.dynamic_modules[key]
          enc_value:torch.Tensor = enc_module(value)
          if enc_value.dim() == 2:
            enc_value = enc_value.unsqueeze(1)
          if enc_value.dim() == 3 and enc_value.size(-1) == self.d_attn:
            extra_feats.append(enc_value)
            
      enc_pattern     = torch.cat(extra_feats, dim=1) # B, x, d_attn
      latent, attnmap = self.transformer(enc_tri_feat, enc_pattern)
      d_xyz        = self.deform_xyz.forward(latent).view(batch_size, -1, 3)
      d_scale      = self.deform_scaling.forward(latent).view(batch_size, -1, 2)
      d_quaternion = self.deform_quaternion.forward(latent).view(batch_size, -1, 4)
      d_opacity    = self.deform_opacity.forward(latent).view(batch_size, -1, 1)
      d_feature    = self.deform_feature.forward(latent).view(batch_size, -1, self.shs_coef_num, 3)
      return d_xyz, d_scale, d_quaternion, d_opacity, d_feature

    def forward(self, 
                xyz, 
                shape = None, 
                expr = None, 
                global_pose = None, 
                neck_pose = None, 
                jaw_pose = None,
                eye_pose = None,
                ):
      if shape is None:
        return self.forward_static(xyz)
      else:
        return self.forward_dynamic(xyz, shape, expr, global_pose, neck_pose, jaw_pose, eye_pose)
# %%
if __name__ == '__main__':
  # Example usage:
  N = 1000  # Number of embeddings
  M = 10000  # Number of binding indices per batch  
  model = DeformNetwork().to('cuda')
  # Example input
  B = 2
  # key & value
  shape = torch.randn(B, 400).cuda()
  expr = torch.randn(B, 100).cuda()
  global_pose = torch.randn(B, 3).cuda()
  neck_pose = torch.randn(B, 3).cuda()
  jaw_pose = torch.randn(B, 3).cuda()
  eye_pose = torch.randn(B, 6).cuda()
  # query
  xyz = torch.randn(B, M, 3).cuda()  # (B, M, 3)
  output = model(xyz, shape, expr, global_pose, neck_pose, jaw_pose, eye_pose)