
import torch
import torch.nn as nn
import math
import numpy as np
from pointutil import Conv1d,  Conv2d, PointNetSetAbstraction, BiasConv1d, square_distance, index_points_group, GRUMappingGCN0
import torch.nn.functional as F
from convNeXT.resnetUnet import convNeXTUnetBig
from dataloader.loader import loader
from fuse import FusionAwareInterp, SKFusion

def smooth_l1_loss(input, target, sigma=10., reduce=True, normalizer=1.0):
    beta = 1. / (sigma ** 2)
    diff = torch.abs(input - target)
    cond = diff < beta
    loss = torch.where(cond, 0.5 * diff ** 2 / beta, diff - 0.5 * beta)
    if reduce:
        return torch.sum(loss) / normalizer
    return torch.sum(loss, dim=1) / normalizer
criterion = smooth_l1_loss

model_list = {
          'tiny': ([3, 3, 9, 3], [96, 192, 384, 768]),
          'small': ([3, 3, 27, 3], [96, 192, 384, 768]),
          'base': ([3, 3, 27, 3], [128, 256, 512, 1024]),
          'large': ([3, 3, 27, 3], [192, 384, 768, 1536])
          }
weight_url_1k = {
    'tiny': "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth",
    'small': "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224.pth",
    'base': "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth",
    'large': "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224.pth"
}

weight_url_22k = {
    'tiny': "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
    'small': "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
    'base': "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
    'large': "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth"
}

class HandModel(nn.Module):
    def __init__(self, joints=21, iters=10):
        super(HandModel, self).__init__()
        
        self.backbone = convNeXTUnetBig('small', pretrain='1k', deconv_dim=128)

        self.encoder_1 = PointNetSetAbstraction(npoint=512, radius=0.1, nsample=64, in_channel=3, mlp=[32,32,128])
        
        self.encoder_2 = PointNetSetAbstraction(npoint=128, radius=0.3, nsample=64, in_channel=128, mlp=[64,64,256])

        self.encoder_3 = nn.Sequential(Conv1d(in_channels=256+3, out_channels=128, bn=True, bias=False),
                                       Conv1d(in_channels=128, out_channels=128, bn=True, bias=False),
                                       Conv1d(in_channels=128, out_channels=512, bn=True, bias=False),
                                       nn.MaxPool1d(128,stride=1))

        self.fold1 = nn.Sequential(BiasConv1d(bias_length=joints, in_channels=512+768, out_channels=512, bn=True),
                                    BiasConv1d(bias_length=joints, in_channels=512, out_channels=512, bn=True),
                                    BiasConv1d(bias_length=joints, in_channels=512, out_channels=512, bn=True))
        self.regress_1 = nn.Conv1d(in_channels=512, out_channels=3, kernel_size=1)

        self.gru = GRUMappingGCN0(nsample=64, in_channel=128+128, latent_channel=512, graph_width=joints, mlp=[512, 512, 512], mlp2=None)
        self.regress = nn.Conv1d(in_channels=512, out_channels=3, kernel_size=1)
        self.interp = FusionAwareInterp(512, 4, None)
        self.fuse2d = SKFusion(128, 512, 128, 'nchw', None, 1)
        self.fc_layer = nn.Linear(384, 256)
        self.fc_layer_hot = nn.Linear(128, 14)
        self.iters = iters
        self.joints = joints

    def encode(self, pc, feat, img, loader, center, M, cube, cam_para):
        # x: B*INPUT_FEATURE_NUM*sample_num_level1*knn_K, y: B*3*sample_num_level1*1
        #[32, 3, 1024]  [32, 3, 1024]  [32, 1, 128, 128] 
        pc1, feat1 = self.encoder_1(pc, feat)# B, 3, 512; B, 64, 512
        
        pc2, feat2 = self.encoder_2(pc1, feat1)# B, 3, 256; B, 128, 256
        
        code = self.encoder_3(torch.cat((pc2, feat2),1))# B, 3, 128; B, 256, 128
        #[32, 512, 1]
        pc_img_feat, c4 = self.backbone(img)   # img_offset: B×C×W×H , C=3(direct vector)+1(heatmap)+1(weight)  [32, 128, 64, 64]
        img_code = torch.max(c4.view(c4.size(0),c4.size(1),-1),-1,keepdims=True)[0] #[B, 768, 1]
        B, C, H, W = pc_img_feat.size()   #32, 128, 64, 64
        img_down = F.interpolate(img, [H, W]) # [32, 1, 64, 64]
        B, _, N = pc1.size()  #32   512

        pcl_closeness, pcl_index, img_xyz = loader.img2pcl_index(pc1.transpose(1,2).contiguous(), img_down, center, M, cube, cam_para, select_num=4)
        # [B, 512, 4] [B, 512, 4] [B, 4096, 3]
        pcl_feat_index = pcl_index.view(B, 1, -1).repeat(1, C, 1)   # B*128*(K*1024)
        pcl_feat = torch.gather(pc_img_feat.view(B, C, -1), -1, pcl_feat_index).view(B, C, N, -1)
        pcl_feat = torch.sum(pcl_feat*pcl_closeness.unsqueeze(1), dim=-1)   #[32, 128, 512]
        feat1 = torch.cat((feat1,pcl_feat),1)  #[32, 256, 512]

        code = code.expand(code.size(0),code.size(1), self.joints) #[32, 512, 14]
        img_code = img_code.expand(img_code.size(0),img_code.size(1), self.joints) #[32, 768, 14]

        latents = self.fold1(torch.cat((code, img_code),1))
        joints = self.regress_1(latents)

        return latents, joints, pc1, feat1, img_xyz, pc_img_feat, pcl_index, pcl_closeness
        #[32, 512, 14], [32, 3, 14], [32, 3, 512], [32, 256, 512], [32, 4096, 3], [32, 128, 64, 64]
    def forward(self, pc, feat, img, loader, center, M, cube, cam_para):
        embed, joint, pc1, feat1, _, pc_img_feat, pcl_index,pcl_closeness = self.encode(pc, feat, img, loader, center, M, cube, cam_para)
        for _ in range(self.iters):
            fusecloseness_value_normal, distance_index = loader.pcl2img_index(joint.transpose(1,2).contiguous(), pc_img_feat.shape[2], center, M, cube, cam_para, 4)
            #[32, 4096, 4] [32, 4096, 4]
            feat_3d_interp = self.interp(joint[:, 0:2, :], pc_img_feat.detach(), embed.detach(), distance_index)  #[16, 512, 64, 64]
            pc_img_feat = self.fuse2d(pc_img_feat, feat_3d_interp) #[16, 128, 64, 64]
            B, C, H, W = pc_img_feat.size() 
            B, _, N = pc1.size()  #N=512
            pc_img_feat_fuse = pc_img_feat.view(B, C, -1).transpose(1,2)  #[16, 4096, 128]
           
            joint_hot = self.fc_layer_hot(pc_img_feat_fuse)  #[16, 4096, 14]
            joint_hot = torch.sigmoid( joint_hot) #[16, 4096, 14]
            joint_hot = joint_hot.transpose(1, 2).view(B, -1, H, W) #[16, 14, 64, 64]

            pcl_feat_index = pcl_index.view(B, 1, -1).repeat(1, C, 1)   # B*128*(K*1024)
            pcl_feat = torch.gather(pc_img_feat.view(B, C, -1), -1, pcl_feat_index).view(B, C, N, -1)  #[16, 128, 512, 4]
            pcl_feat = torch.sum(pcl_feat*pcl_closeness.unsqueeze(1), dim=-1)   #[16, 128, 512]
            feat1 = torch.cat((feat1,pcl_feat),1)  #[16, 384, 512]
            feat1=  self.fc_layer(feat1.transpose(1,2)).transpose(1,2)
           
            embed_ = self.gru(joint, pc1, embed, feat1)
            joint = self.regress(embed_ - embed) + joint
            embed = embed_

           
        return joint
    
    
        

        
    
    def get_loss(self, pc, feat, img, loader, center, M, cube, cam_para, gt):
        embed, joint, pc1, feat1, _, pc_img_feat, pcl_index,pcl_closeness = self.encode(pc, feat, img, loader, center, M, cube, cam_para)
        loss = smooth_l1_loss(joint, gt)
        # [32, 512, 14]  [32, 3, 14]  [32, 3, 512]  [32, 256, 512] _ [32, 128, 64, 64]
        
       
       
        for _ in range(self.iters):
            fusecloseness_value_normal, distance_index = loader.pcl2img_index(joint.transpose(1,2).contiguous(), pc_img_feat.shape[2], center, M, cube, cam_para, 4)
            #[32, 4096, 4] [32, 4096, 4]
            feat_3d_interp = self.interp(joint[:, 0:2, :], pc_img_feat.detach(), embed.detach(), distance_index)  #[16, 512, 64, 64]
            pc_img_feat = self.fuse2d(pc_img_feat, feat_3d_interp) #[16, 128, 64, 64]
            B, C, H, W = pc_img_feat.size() 
            B, _, N = pc1.size()  #N=512
            pc_img_feat_fuse = pc_img_feat.view(B, C, -1).transpose(1,2)  #[16, 4096, 128]
           
            joint_hot = self.fc_layer_hot(pc_img_feat_fuse)  #[16, 4096, 14]
            joint_hot = torch.sigmoid( joint_hot) #[16, 4096, 14]
            joint_hot = joint_hot.transpose(1, 2).view(B, -1, H, W) #[16, 14, 64, 64]

            pcl_feat_index = pcl_index.view(B, 1, -1).repeat(1, C, 1)   # B*128*(K*1024)
            pcl_feat = torch.gather(pc_img_feat.view(B, C, -1), -1, pcl_feat_index).view(B, C, N, -1)  #[16, 128, 512, 4]
            pcl_feat = torch.sum(pcl_feat*pcl_closeness.unsqueeze(1), dim=-1)   #[16, 128, 512]
            feat1 = torch.cat((feat1,pcl_feat),1)  #[16, 384, 512]
            feat1=  self.fc_layer(feat1.transpose(1,2)).transpose(1,2)
           
            embed_ = self.gru(joint, pc1, embed, feat1)    #[16, 512, 14]
            joint = self.regress(embed_ - embed) + joint    #[16, 3, 14]
            embed = embed_  #
            loss += smooth_l1_loss(joint, gt)
           
        return loss

