# change from https://colab.research.google.com/drive/1tqdYbjmFq4PK3Di7sLONd0RkKS0hBgId?usp=sharing

import os
import os.path as osp
from typing import Optional, Tuple, Any
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
# sam2相关红色下划线可忽略
from sam2.modeling.sam2_base import SAM2Base


class SAM2ImageEncoder(nn.Module):
    def __init__(self, sam_model: SAM2Base) -> None:
        super().__init__()
        self.model = sam_model
        print('self.model.image_size: {}'.format(self.model.image_size))
        self.image_encoder = sam_model.image_encoder
        self.no_mem_embed = sam_model.no_mem_embed

    def forward(self, x: torch.Tensor) -> tuple[Any, Any, Any]:
        backbone_out = self.image_encoder(x)
        backbone_out["backbone_fpn"][0] = self.model.sam_mask_decoder.conv_s0(
            backbone_out["backbone_fpn"][0]
        )
        backbone_out["backbone_fpn"][1] = self.model.sam_mask_decoder.conv_s1(
            backbone_out["backbone_fpn"][1]
        )

        feature_maps = backbone_out["backbone_fpn"][-self.model.num_feature_levels:]
        vision_pos_embeds = backbone_out["vision_pos_enc"][-self.model.num_feature_levels:]

        feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]

        # flatten NxCxHxW to HWxNxC
        vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
        vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]

        vision_feats[-1] = vision_feats[-1] + self.no_mem_embed

        feats = [feat.permute(1, 2, 0).reshape(1, -1, *feat_size)
                 for feat, feat_size in zip(vision_feats[::-1], feat_sizes[::-1])][::-1]

        return feats[0], feats[1], feats[2]


class SAM2ImageDecoder(nn.Module):
    def __init__(
            self,
            sam_model: SAM2Base,
            multimask_output: bool
    ) -> None:
        super().__init__()
        self.mask_decoder = sam_model.sam_mask_decoder
        self.prompt_encoder = sam_model.sam_prompt_encoder
        self.model = sam_model
        print('self.model.image_size: {}'.format(self.model.image_size))
        self.multimask_output = multimask_output

    @torch.no_grad()
    def forward(
            self,
            image_embed: torch.Tensor,
            high_res_feats_0: torch.Tensor,
            high_res_feats_1: torch.Tensor,
            point_coords: torch.Tensor,
            point_labels: torch.Tensor,
            mask_input: torch.Tensor,
            has_mask_input: torch.Tensor,
    ):
        for b in range(image_embed.shape[0]):
            sparse_embedding = self._embed_points(point_coords[b], point_labels[b])
            dense_embedding = self._embed_masks(mask_input[b], has_mask_input[b])
            # print(sparse_embedding.shape)
            # print(dense_embedding.shape)

            masks, iou_predictions, _, _ = self.mask_decoder.predict_masks(
                image_embeddings=image_embed[b].unsqueeze_(0),
                image_pe=self.prompt_encoder.get_dense_pe(),
                sparse_prompt_embeddings=sparse_embedding,
                dense_prompt_embeddings=dense_embedding,
                repeat_image=False,
                high_res_features=[high_res_feats_0[b], high_res_feats_1[b]],
            )

            if self.multimask_output:
                masks = masks[:, 1:, :, :]
                iou_predictions = iou_predictions[:, 1:]
            else:
                masks, iou_predictions = self.mask_decoder._dynamic_multimask_via_stability(masks, iou_predictions)

        return masks, iou_predictions
        
    def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:

        point_coords = point_coords + 0.5

        padding_point = torch.zeros((point_coords.shape[0], 1, 2), device=point_coords.device)
        padding_label = -torch.ones((point_labels.shape[0], 1), device=point_labels.device)
        point_coords = torch.cat([point_coords, padding_point], dim=1)
        point_labels = torch.cat([point_labels, padding_label], dim=1)

        point_coords[:, :, 0] = point_coords[:, :, 0] / self.model.image_size
        point_coords[:, :, 1] = point_coords[:, :, 1] / self.model.image_size

        point_embedding = self.prompt_encoder.pe_layer._pe_encoding(point_coords)
        point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)

        point_embedding = point_embedding * (point_labels != -1)
        point_embedding = point_embedding + self.prompt_encoder.not_a_point_embed.weight * (
                point_labels == -1
        )

        for i in range(self.prompt_encoder.num_point_embeddings):
            point_embedding = point_embedding + self.prompt_encoder.point_embeddings[i].weight * (point_labels == i)

        return point_embedding

    def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
        mask_embedding = has_mask_input * self.prompt_encoder.mask_downscaling(input_mask)
        mask_embedding = mask_embedding + (
                1 - has_mask_input
        ) * self.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
        return mask_embedding
    
    def docoder_postprocess(self, masks, iou_predictions, img_size):
        
        masks = torch.clamp(masks, -32.0, 32.0)
        # print(masks.shape, iou_predictions.shape)

        masks = F.interpolate(masks, (img_size[0], img_size[1]), mode="bilinear", align_corners=False)

        return masks, iou_predictions


def obtain_model_info(model_cfg_dir : str):
    model_type = 'sam2.1_hiera_small'  # @param ["sam2.1_hiera_tiny", "sam2.1_hiera_small", "sam2.1_hiera_large", "sam2.1_hiera_base_plus"]
    # input_size = 768 # @param {type:"slider", min:160, max:4102, step:8}
    input_size = 1024  # 之所以是固定值1024，那是因为加载的 SAM2Base 的 image_size 是1024，而 pytroch 中 sam2/sam2_image_predictor.py 的 SAM2Transforms 传入的就是 model.image_size
    multimask_output = False

    if model_type == "sam2.1_hiera_tiny":
        model_cfg = "sam2.1_hiera_t.yaml"
    elif model_type == "sam2.1_hiera_small":
        model_cfg = "sam2.1_hiera_s.yaml"
    elif model_type == "sam2.1_hiera_base_plus":
        model_cfg = "sam2.1_hiera_b+.yaml"
    else:
        model_cfg = "sam2.1_hiera_l.yaml"

    model_cfg = osp.join(model_cfg_dir, model_cfg)
    return model_type, input_size, multimask_output, model_cfg


def export_onnx(model_type : str, input_size : int, multimask_output : bool, model_cfg : str, model_dir : str):
    import torch
    from sam2.build_sam import build_sam2

    sam2_checkpoint = osp.join(model_dir, f"{model_type}.pt")
    sam2_model = build_sam2(model_cfg, sam2_checkpoint, device="cpu")

    img = torch.randn(1, 3, input_size, input_size).cpu()

    # encoder export
    sam2_encoder = SAM2ImageEncoder(sam2_model).cpu()
    high_res_feats_0, high_res_feats_1, image_embed = sam2_encoder(img)
    print('encode')
    print(img.shape)
    print(high_res_feats_0.shape)
    print(high_res_feats_1.shape)
    print(image_embed.shape)

    torch.onnx.export(sam2_encoder,
                      img,
                      osp.join(model_dir, f"{model_type}_encoder.onnx"),
                      export_params=True,
                      opset_version=17,
                      do_constant_folding=True,
                      input_names=['image'],
                      output_names=['high_res_feats_0', 'high_res_feats_1', 'image_embed'],
                    #   这里要改成动态batch很简单，主要看decode部分
                    #   dynamic_axes={'image': {0: "batch"},
                    #                 'high_res_feats_0': {0: "batch"},
                    #                 'high_res_feats_1': {0: "batch"},
                    #                 'image_embed': {0: "batch"},}
                      )

    # decoder export
    sam2_decoder = SAM2ImageDecoder(sam2_model, multimask_output=multimask_output).cpu()

    # embed_dim = sam2_model.sam_prompt_encoder.embed_dim
    embed_size = (sam2_model.image_size // sam2_model.backbone_stride, sam2_model.image_size // sam2_model.backbone_stride)
    mask_input_size = [4 * x for x in embed_size]
    # print(embed_dim, embed_size, mask_input_size)

    point_coords = torch.randint(low=0, high=input_size, size=(1, 1, 5, 2), dtype=torch.float)
    point_labels = torch.randint(low=0, high=1, size=(1, 1, 5), dtype=torch.float)
    mask_input = torch.randn(1, 1, *mask_input_size, dtype=torch.float)
    has_mask_input = torch.tensor([[1]], dtype=torch.float)
    orig_im_size = torch.tensor([input_size, input_size], dtype=torch.int32)

    masks, scores = sam2_decoder(image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels, mask_input, has_mask_input)

    masks, scores = sam2_decoder.docoder_postprocess(masks, scores, orig_im_size)

    print('decode')
    print(image_embed.shape)
    print(high_res_feats_0.shape)
    print(high_res_feats_1.shape)
    print(point_coords.shape)
    print(point_labels.shape)
    print(mask_input.shape)
    print(has_mask_input.shape)
    print(orig_im_size.shape)
    print(masks.shape)
    print(scores.shape)

    torch.onnx.export(sam2_decoder,
                      (image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels, mask_input, has_mask_input),
                      osp.join(model_dir, "decoder.onnx"),
                      export_params=True,
                      opset_version=16,
                      do_constant_folding=True,
                      input_names=['image_embed', 'high_res_feats_0', 'high_res_feats_1', 'point_coords', 'point_labels', 'mask_input', 'has_mask_input'],
                      output_names=['masks', 'iou_predictions'],
                      dynamic_axes={
                                    # 须知 https://gitee.com/luo_zhi_cheng/sam2/blob/main/sam2/sam2_image_predictor.py 中的predict_batch仅仅为for循环的形式，这里要生成多batch的onnx需要修改SAM2ImageDecoder
                                    'image_embed': {0: 'batch'},
                                    'high_res_feats_0': {0: 'batch'},
                                    'high_res_feats_1': {0: 'batch'},
                                    "point_coords": {0: 'batch', 1: "num_labels", 2: "num_points"},
                                    "point_labels": {0: 'batch', 1: "num_labels", 2: "num_points"},
                                    "mask_input": {0: 'batch', 1: "num_labels"},
                                    "has_mask_input": {0: 'batch', 1: "num_labels"}
                                    }
                      )


def main():
    model_dir = '/weights'
    model_cfg_dir = '//opt/sam2/sam2/configs/sam2.1'
    model_type, input_size, multimask_output, model_cfg = obtain_model_info(model_cfg_dir)
    export_onnx(model_type, input_size, multimask_output, model_cfg, model_dir)
    print('finish')


if __name__ == '__main__':
    main()
