import torch
import torch.nn as nn
from sam2.build_sam import build_sam2
from sam2.sam2_image_predictor import SAM2ImagePredictor
import cv2
import numpy as np
from torchvision.transforms import functional as F
from torchvision import transforms


class SAM2(nn.Module):
    def __init__(self, config_file, ckpt_path):
        super(SAM2, self).__init__()
        self.sam2_model = build_sam2(config_file=config_file, ckpt_path=ckpt_path)
        self.sam2_predictor = SAM2ImagePredictor(self.sam2_model)

    def preprocess_prompts(self, points, labels, box, mask_logit, normalize_coords=True, img_idx=-1):
        mask_input, unnorm_coords, labels, unnorm_box = self.sam2_predictor._prep_prompts(points, labels, box, mask_logit, normalize_coords, img_idx)
        # 提示嵌入
        if unnorm_coords is not None:
            concat_points = (unnorm_coords, labels)
        else:
            concat_points = None
        if unnorm_box is not None:
            box_coords = unnorm_box.reshape(1, -1, 2)
            box_labels = torch.tensor([2, 3], dtype=torch.int, device=unnorm_box.device)
            box_labels = box_labels.repeat(box_coords.size(1) // 2).unsqueeze(0)
            if concat_points is not None:
                concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
                concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
                concat_points = (concat_coords, concat_labels)
            else:
                concat_points = (box_coords, box_labels)
        # box和point会嵌入为一个向量concat_points,稠密提示mask为一个
        return concat_points, mask_input

    def image_encoder(self, image):
        self.sam2_predictor.set_image(image)
        image_embeds = self.sam2_predictor._features["image_embed"]
        high_res_feats = self.sam2_predictor._features["high_res_feats"]
        return image_embeds, high_res_feats

    def prompts_encoder(self, point_bbox_embeds, mask_embeds):
        sparse_embeddings, dense_embeddings = self.sam2_model.sam_prompt_encoder(points=point_bbox_embeds, boxes=None, masks=mask_embeds)
        return sparse_embeddings, dense_embeddings

    def mask_decoder(self, image_embeds, sparse_embeds, dense_embeds, high_res_features, multimask_output, batched_mode, img_idx=-1):
        low_res_masks, iou_predictions, sam_tokens_out, object_score_logits = self.sam2_model.sam_mask_decoder(
            image_embeddings=image_embeds[img_idx].unsqueeze(0),
            image_pe=self.sam2_model.sam_prompt_encoder.get_dense_pe(),
            sparse_prompt_embeddings=sparse_embeds,
            dense_prompt_embeddings=dense_embeds,
            multimask_output=multimask_output,
            repeat_image=batched_mode,
            high_res_features=high_res_features,
        )
        return low_res_masks, iou_predictions

    def postprocess_masks(self, low_res_masks, org_size):
        masks = self.sam2_predictor._transforms.postprocess_masks(low_res_masks, org_size)
        return masks

    def forward(self,
                image,
                points=None,
                labels=None,
                box=None,
                mask_logit=None,
                ):
        # 编码图片
        image_embeds, high_res_feats = self.image_encoder(image)
        # 提示预处理
        point_box_embeds, mask_embeds = self.preprocess_prompts(points=points, labels=labels, box=box, mask_logit=mask_logit, normalize_coords=True)
        # 编码提示
        sparse_embeds, dense_embeds = self.prompts_encoder(point_bbox_embeds=point_box_embeds, mask_embeds=mask_embeds)
        # 解码掩模
        low_res_masks, iou_predictions = self.mask_decoder(
            image_embeds=image_embeds,
            sparse_embeds=sparse_embeds,
            dense_embeds=dense_embeds,
            high_res_features=[feat_level[-1].unsqueeze(0) for feat_level in high_res_feats],
            multimask_output=False,
            batched_mode=(point_box_embeds is not None and point_box_embeds[0].shape[0] > 1),
        )
        # 还原至原始分辨率
        masks = self.postprocess_masks(low_res_masks, self.sam2_predictor._orig_hw[-1])
        # 二分类输出
        binary_masks = torch.sigmoid(masks)
        return binary_masks
