import os,sys
sys.path.append(os.getcwd())
from PIL import Image
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.utils import make_grid, save_image
from torchvision.ops import RoIPool

from InitPose.models.DSANet import DSANet
from InitPose.lib.utils import quaternion_to_matrix
from InitPose.lib.loss.loss import _LABEL2MASK_THRESHOL, loss_cross_entropy, loss_Rotation\
    , IOUselection, HoughVoting, loss_rotation

from InitPose.models.fftformerNet import LayerNorm

class FeatureExtraction(nn.Module):
    """
    Feature Embedding Module for PoseCNN. Using pretrained VGG16 network as backbone.
    """    
    def __init__(self, pretrained_model,num_res):
        super(FeatureExtraction, self).__init__()
        self.embedding = DSANet(num_res)
        self.outConv = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=2,padding=1)
    
    def forward(self, datadict):
        """
        feature1: [bs, 128, H/4, W/4]
        feature2: [bs, 128, H/4, W/4]
        """ 
        _feature1 ,feature2 = self.embedding(datadict['rgb'])
        feature1 = self.outConv(_feature1)
        return feature1, feature2
    

class SegmentationBranch(nn.Module):
    """
    Instance Segmentation Module for PoseCNN. 
    """    
    def __init__(self, num_classes = 1, hidden_layer_dim = 64):
        super(SegmentationBranch, self).__init__()
        seg_branch = []
        seg_branch.append(nn.Conv2d(128,hidden_layer_dim,1,bias=True)) #for 1x1
        seg_branch.append(nn.ReLU(inplace=False)) #for RelU

        self.num_classes = num_classes + 1

        #aux function to initialize all conv2d with kaiming
        def init_weights(m):
          if isinstance(m,nn.Conv2d):
            torch.nn.init.kaiming_normal_(m.weight)
            m.bias.data.fill_(0.0)

        self.seg_branch = nn.Sequential(*seg_branch)
        self.seg_branch.apply(init_weights)

        self.conv_class = nn.Conv2d(64,num_classes+1,1,bias=True)

        torch.nn.init.kaiming_normal_(self.conv_class.weight)
        self.conv_class.bias.data.fill_(0.0)

        self.relu = nn.ReLU(inplace=False)
  

    def forward(self, feature1, feature2):

        probability = None
        segmentation = None
        bbx = None

        f1 = self.seg_branch(feature1)
        f2 = self.seg_branch(feature2)
        concat_ft = f1 + f2

        up_cat = torch.nn.Upsample(scale_factor=4)
        concat_upsampled = up_cat(concat_ft)
        probs = self.conv_class(concat_upsampled)
        probability = nn.functional.softmax(probs,dim=1)
        segmentation = torch.max(probability,dim=1)[1]
        # grid = make_grid(probability)
        # save_image(grid, 'img/output.png')
        # # print(segmentation.shape)
        # # save_image(segmentation, 'img/output.png')
        # #print(segmentation[0])
        bbx = self.label2bbx(segmentation)
        return probability, segmentation, bbx
    
    def label2bbx(self, label):
        bbx = []
        bs, H, W = label.shape
        device = label.device
        label_repeat = label.view(bs, 1, H, W).repeat(1, self.num_classes, 1, 1).to(device)
        label_target = torch.linspace(0, self.num_classes - 1, steps = self.num_classes).view(1, -1, 1, 1).repeat(bs, 1, H, W).to(device)
        mask = (label_repeat == label_target)
        for batch_id in range(mask.shape[0]):
            for cls_id in range(mask.shape[1]):
                if cls_id != 0: 
                    # cls_id == 0 is the background
                    y, x = torch.where(mask[batch_id, cls_id] != 0)
                    if y.numel() >= _LABEL2MASK_THRESHOL:
                        bbx.append([batch_id, torch.min(x).item(), torch.min(y).item(), 
                                    torch.max(x).item(), torch.max(y).item(), cls_id])
        return torch.tensor(bbx).to(device)

class TranslationBranch(nn.Module):
    """
    3D Translation Estimation Module for PoseCNN. 
    """    
    def __init__(self, num_classes = 1, hidden_layer_dim = 128,using_conv=False):
        super(TranslationBranch, self).__init__()
        self.hidden_layer_dim = hidden_layer_dim
        trans_branch = []
        if using_conv:
            self.hidden_layer_dim=64
            trans_branch.append(nn.Conv2d(128,self.hidden_layer_dim,1,bias=True))
        trans_branch.append(nn.ReLU(inplace=False))

        def init_weights(m):
          if isinstance(m,nn.Conv2d):
            torch.nn.init.kaiming_normal_(m.weight)
            m.bias.data.fill_(0.0)

        self.trans_branch = nn.Sequential(*trans_branch)
        self.trans_branch.apply(init_weights)

        self.conv_class = nn.Conv2d(self.hidden_layer_dim,3*num_classes,1,bias=True)

        torch.nn.init.kaiming_normal_(self.conv_class.weight)
        self.conv_class.bias.data.fill_(0.0)

        self.relu = nn.ReLU(inplace=False)

    def forward(self, feature1, feature2):
        f1 = self.trans_branch(feature1)
        f2 = self.trans_branch(feature2)
        concat_ft = f1 + f2

        up_cat = torch.nn.Upsample(scale_factor=4)
        concat_upsampled = up_cat(concat_ft)

        return self.conv_class(concat_upsampled)


class RotationBranch(nn.Module):
    """
    3D Rotation Regression Module for PoseCNN. 
    """    
    def __init__(self,
                 feature_dim = 128, 
                 roi_shape = 7,
                 hidden_dim = 4096, 
                 num_classes = 1,
                 using_normal=True,
                 LayerNorm_type="WithBias",
                 rotation_mode = "6d"):
        super(RotationBranch, self).__init__()
        self.rotation_mode = rotation_mode
        self.using_normal = using_normal        
        if self.using_normal:
            self.layer_norm1 = LayerNorm(feature_dim,LayerNorm_type=LayerNorm_type)
            self.layer_norm2 = LayerNorm(feature_dim,LayerNorm_type=LayerNorm_type)
        self.ROI_1 = RoIPool((roi_shape,roi_shape),1/8)
        self.ROI_2 = RoIPool((roi_shape,roi_shape),1/16)
        self.quat = nn.Sequential(
          nn.Linear(in_features = feature_dim*roi_shape*roi_shape,out_features=4096),
          nn.ReLU(inplace=False),
          nn.Linear(in_features=4096, out_features=4096),
          nn.ReLU(inplace=False),
          nn.Linear(in_features=4096,out_features=4*num_classes) if self.rotation_mode == "4d" else nn.Linear(in_features=4096,out_features=6*num_classes)
        )

        def init_weights(m):
          if isinstance(m,nn.Linear):
            m.bias.data.fill_(0.0)

        self.quat.apply(init_weights)

    def forward(self, feature1, feature2, bbx):
        # Replace "pass" statement with your code
        rois = bbx.to(feature1.dtype)
        ft_1_roi = self.ROI_1(feature1,rois)
        ft_2_roi = self.ROI_2(feature2,rois)
        if self.using_normal:
            ft_2_roi = self.layer_norm2(ft_2_roi)   
            ft_1_roi = self.layer_norm1(ft_1_roi)
        ft_cat = ft_1_roi + ft_2_roi
        
        return self.quat(ft_cat.flatten(1))

       
class PoseAttention(nn.Module):

    def __init__(self, pretrained_backbone, 
                 models_pcd,
                 cam_intrinsic,
                 using_loss_model,
                 num_res,
                 using_normal,
                 LayerNorm_type,
                 using_conv,
                 rotation_mode = "6d"):
        super(PoseAttention, self).__init__()
        self.using_loss_model = using_loss_model
        self.rotation_mode = rotation_mode
        
        self.iou_threshold = 0.7
        self.models_pcd = models_pcd
        self.cam_intrinsic = cam_intrinsic


        self.FeatureExtraction = FeatureExtraction(pretrained_backbone,num_res)
        self.SegmentationBranch = SegmentationBranch()
        self.TranslationBranch = TranslationBranch(using_conv=using_conv)
        self.RotationBranch = RotationBranch(using_normal=using_normal,LayerNorm_type=LayerNorm_type,rotation_mode=self.rotation_mode)

    def forward(self, input_dict):
        if self.training:
            loss_dict = {
                "loss_segmentation": 0,
                "loss_centermap": 0,
                "loss_R": 0
            }

            gt_bbx = self.getGTbbx(input_dict)

            feature1,feature2 = self.FeatureExtraction(input_dict)
            probs,seg,bbx = self.SegmentationBranch(feature1,feature2)

            translation = self.TranslationBranch(feature1,feature2) #(2,30,480,640)
            gt_labels = input_dict["label"]
            gt_center = input_dict["centermaps"]
            loss_dict["loss_segmentation"] = loss_cross_entropy(probs,gt_labels)

            #declare l1 loss for center xyz
            center_loss =  torch.nn.L1Loss()

            #get the translation prediction and groundtruth
            #pred_T = self.estimateTrans(translation,bbx,gt_labels)
            #print("predicted_translation",pred_T)
            #gt_T = self.gtTrans(filtered_bbx,input_dict)
            #trans_loss = center_loss(pred_T,gt_T)
            #loss_dict["loss_centermap"] = trans_loss

            #this one or the previous one 
            trans_loss = center_loss(translation,gt_center)
            loss_dict["loss_centermap"] = trans_loss

            #filter your boundary boxes
            filtered_bbx = IOUselection(bbx,gt_bbx,self.iou_threshold)

            if len(filtered_bbx != 0):
              #print("labels",labels)
                quaternion_map = self.RotationBranch(feature1,feature2,bbx[:,:5])
                gt_rot = self.gtRotation(filtered_bbx,input_dict)
                Pred_rot,labels = self.estimateRotation(quaternion_map,filtered_bbx)
                if self.using_loss_model:#使用3d点云模型
                    rot_loss = loss_Rotation(Pred_rot,gt_rot,labels,self.models_pcd)
                else:
                    rot_loss = loss_rotation(Pred_rot,gt_rot)
                loss_dict["loss_R"] = rot_loss
            
            return loss_dict
        else:
            output_dict = None
            segmentation = None

            with torch.no_grad():

                feature1,feature2 = self.FeatureExtraction(input_dict)
                probs,segmentation,bbx = self.SegmentationBranch(feature1,feature2)
                translation_map = self.TranslationBranch(feature1,feature2) #(2,30,480,640)

                pred_centers, pred_depths = HoughVoting(segmentation, translation_map)

                quaternion_map = self.RotationBranch(feature1, feature2, bbx[:, :5])
                pred_Rs, lbl = self.estimateRotation(quaternion_map, bbx)
                
                output_dict = self.generate_pose(pred_Rs, pred_centers, pred_depths, bbx)

            return output_dict, segmentation
    
    def estimateTrans(self, translation_map, filter_bbx, pred_label):
        """
        translation_map: a tensor [batch_size, num_classes * 3, height, width]
        filter_bbx: N_filter_bbx * 6 (batch_ids, x1, y1, x2, y2, cls)
        label: a tensor [batch_size, num_classes, height, width]
        """
        N_filter_bbx = filter_bbx.shape[0]
        pred_Ts = torch.zeros(N_filter_bbx, 3)
        for idx, bbx in enumerate(filter_bbx):
            batch_id = int(bbx[0].item())
            cls = int(bbx[5].item())
            trans_map = translation_map[batch_id, (cls-1) * 3 : cls * 3, :]
            label = (pred_label[batch_id] == cls).detach()
            pred_T = trans_map[:, label].mean(dim=1)
            pred_Ts[idx] = pred_T
        return pred_Ts

    def gtTrans(self, filter_bbx, input_dict):
        N_filter_bbx = filter_bbx.shape[0]
        gt_Ts = torch.zeros(N_filter_bbx, 3)
        for idx, bbx in enumerate(filter_bbx):
            batch_id = int(bbx[0].item())
            cls = int(bbx[5].item())
            gt_Ts[idx] = input_dict['RTs'][batch_id][cls - 1][:3, [3]].T
        return gt_Ts 

    def getGTbbx(self, input_dict):
        """
            bbx is N*6 (batch_ids, x1, y1, x2, y2, cls)
        """
        gt_bbx = []
        objs_id = input_dict['objs_id']
        device = objs_id.device
        ## [x_min, y_min, width, height]
        bbxes = input_dict['bbx']
        for batch_id in range(bbxes.shape[0]):
            for idx, obj_id in enumerate(objs_id[batch_id]):
                if obj_id.item() != 0:
                    # the obj appears in this image
                    bbx = bbxes[batch_id][idx]
                    gt_bbx.append([batch_id, bbx[0].item(), bbx[1].item(),
                                  bbx[2].item(), bbx[3].item(), obj_id.item()])
        return torch.tensor(gt_bbx).to(device=device, dtype=torch.int16)
        
    def estimateRotation(self, quaternion_map, filter_bbx):
        """
        quaternion_map: a tensor [batch_size, num_classes * 3, height, width]
        filter_bbx: N_filter_bbx * 6 (batch_ids, x1, y1, x2, y2, cls)
        """
        N_filter_bbx = filter_bbx.shape[0]
        pred_Rs = torch.zeros(N_filter_bbx, 3, 3)
        label = []
        for idx, bbx in enumerate(filter_bbx):
            batch_id = int(bbx[0].item())
            cls = int(bbx[5].item())
            if self.rotation_mode == "4d":
                quaternion = quaternion_map[idx, (cls-1) * 4 : cls * 4]
                quaternion = nn.functional.normalize(quaternion, dim=0)
                pred_Rs[idx] = quaternion_to_matrix(quaternion)
            else:
                quaternion = quaternion_map[idx, (cls-1) * 6 : cls * 6]
                # quaternion = nn.functional.normalize(quaternion, dim=0)
                pred_Rs[idx] = self.rotation_6d_to_matrix(quaternion)
            label.append(cls)
        label = torch.tensor(label)
        return pred_Rs, label
    def rotation_6d_to_matrix(self,rot_6d):
        """
        Given a 6D rotation output, calculate the 3D rotation matrix in SO(3) using the Gramm Schmit process

        For details: https://openaccess.thecvf.com/content_CVPR_2019/papers/Zhou_On_the_Continuity_of_Rotation_Representations_in_Neural_Networks_CVPR_2019_paper.pdf
        """
        # bs, n_q, _ = rot_6d.shape
        rot_6d = rot_6d.view(-1, 6)
        m1 = rot_6d[:, 0:3]
        m2 = rot_6d[:, 3:6]

        x = F.normalize(m1, p=2, dim=1)
        z = torch.cross(x, m2, dim=1)
        z = F.normalize(z, p=2, dim=1)
        y = torch.cross(z, x, dim=1)
        rot_matrix = torch.cat((x.view(-1, 3, 1), y.view(-1, 3, 1), z.view(-1, 3, 1)), 2)  # Rotation Matrix lying in the SO(3)
        rot_matrix = rot_matrix.view( 3, 3)  #.transpose(2, 3)
        return rot_matrix
    def gtRotation(self, filter_bbx, input_dict):
        N_filter_bbx = filter_bbx.shape[0]
        gt_Rs = torch.zeros(N_filter_bbx, 3, 3)
        for idx, bbx in enumerate(filter_bbx):
            batch_id = int(bbx[0].item())
            cls = int(bbx[5].item())
            gt_Rs[idx] = input_dict['RTs'][batch_id][cls - 1][:3, :3]
        return gt_Rs 

    def generate_pose(self, pred_Rs, pred_centers, pred_depths, bbxs):
        """
        pred_Rs: a tensor [pred_bbx_size, 3, 3]
        pred_centers: [batch_size, num_classes, 2]
        pred_depths: a tensor [batch_size, num_classes]
        bbx: a tensor [pred_bbx_size, 6]
        """        
        output_dict = {}
        for idx, bbx in enumerate(bbxs):
            bs, _, _, _, _, obj_id = bbx
            R = pred_Rs[idx].numpy()
            center = pred_centers[bs, obj_id - 1].numpy()
            depth = pred_depths[bs, obj_id - 1].numpy()
            if (center**2).sum().item() != 0:
                T = np.linalg.inv(self.cam_intrinsic) @ np.array([center[0], center[1], 1]) * depth
                T = T[:, np.newaxis]
                if bs.item() not in output_dict:
                    output_dict[bs.item()] = {}
                output_dict[bs.item()][obj_id.item()] = np.vstack((np.hstack((R, T)), np.array([[0, 0, 0, 1]])))
        return output_dict

if __name__== "__main__":
    '''
    Trans = TranslationBranch()
    # 评估模型的大小
    out =Trans(feature1,feature2)
    print(out.shape)
    '''
    
    # feature1=torch.randn(4,128,160,120)
    # feature2 = torch.randn(4,128,160,120)
    # rbranch = RotationBranch()
    # bbox = torch.randn(4,5)
    # out =rbranch(feature1,feature2,bbox)
    # print(out.shape)
    
    """
    Given a 6D rotation output, calculate the 3D rotation matrix in SO(3) using the Gramm Schmit process

    For details: https://openaccess.thecvf.com/content_CVPR_2019/papers/Zhou_On_the_Continuity_of_Rotation_Representations_in_Neural_Networks_CVPR_2019_paper.pdf
    """
    rot_6d= torch.randn(6)
    # bs, n_q, _ = rot_6d.shape
    rot_6d = rot_6d.view(-1, 6)
    m1 = rot_6d[:, 0:3]
    m2 = rot_6d[:, 3:6]

    x = F.normalize(m1, p=2, dim=1)
    z = torch.cross(x, m2, dim=1)
    z = F.normalize(z, p=2, dim=1)
    y = torch.cross(z, x, dim=1)
    rot_matrix = torch.cat((x.view(-1, 3, 1), y.view(-1, 3, 1), z.view(-1, 3, 1)), 2)  # Rotation Matrix lying in the SO(3)
    rot_matrix = rot_matrix.view( 3, 3)  #.transpose(2, 3)
    print(rot_matrix)
    print(rot_matrix.shape)
