import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

import sys
import os.path as osp
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from sam_pth.common_tools import *


def main():
    '''
    涵盖以下内容：
    1.在一个帧上添加点击（或框）以获取并细化掩码块（时空掩码）；
    2.传播点击（或框）以在整个视频中获取掩码块；
    3.同时对多个对象进行分割和跟踪。
    '''
    device = device_select()
    sam2_checkpoint = "/weights/sam2.1_hiera_small.pt"
    model_cfg = "//opt/sam2/sam2/configs/sam2.1/sam2.1_hiera_s.yaml"
    predictor = init_video_predictor(sam2_checkpoint, model_cfg, device)

    # `video_dir` a directory of JPEG frames with filenames like `<frame_index>.jpg`
    video_dir = "/opt/sam2/notebooks/videos/bedroom"
    save_dir = '/data'

    # scan all the JPEG frame names in this directory
    frame_names = [
        p for p in os.listdir(video_dir)
        if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
    ]
    frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))

    # take a look the first video frame
    frame_idx = 0
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[frame_idx])))
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-1.jpg'))

    # SAM2进行交互式视频分割时需要有状态推理，因此我们需要对该视频初始化一个推理状态。
    # 在初始化过程中，它会加载video_path中的所有JPEG帧，并将它们的像素存储在inference_state中
    inference_state = predictor.init_state(video_path=video_dir)

    # Example1: Segment & track one object
    # 如果您之前使用此inference_state进行过任何跟踪操作，请首先通过reset_state对其进行重置。
    predictor.reset_state(inference_state)

    # 第1步：在一个帧上添加第一个点击操作。
    # 首先，让我们尝试分割左侧的小孩。
    # 这里我们通过将坐标和标签发送到add_new_points_or_box API中，在(x, y) = (210, 350)处进行一次正点击，其标签为1。
    # 注意：标签1表示正点击（用于添加一个区域），而标签0表示负点击（用于移除一个区域）。
    ann_frame_idx = 0  # the frame index we interact with
    ann_obj_id = 1  # give a unique id to each object we interact with (it can be any integers)
    points = np.array([[210, 350]], dtype=np.float32)# Let's add a positive click at (x, y) = (210, 350) to get started
    labels = np.array([1], np.int32)# for labels, `1` means positive click and `0` means negative click

    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    show_mask_video((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0])
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-2.jpg'))

    # 第2步：添加第二次点击以细化预测
    # 嗯，看起来尽管我们想要分割左侧的小孩，但模型仅预测出了短裤的掩码 —— 这是可能发生的，因为仅凭一次点击对于目标对象应该是什么存在歧义。
    # 我们可以通过在小孩的衬衫上进行另一次正点击来细化该帧上的掩码。
    # 这里我们在(x, y) = (250, 220)处进行第二次正点击，其标签为1，以扩展掩码。
    # 注意：在调用add_new_points_or_box时，我们需要发送所有的点击及其标签（即不仅仅是最后一次点击）。
    ann_frame_idx = 0
    ann_obj_id = 1
    points = np.array([[210, 350], [250, 220]], dtype=np.float32)
    labels = np.array([1, 1], np.int32)

    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    show_mask_video((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0])
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-3.jpg'))

    # 通过这第二次细化点击，现在我们在第0帧上得到了整个小孩的分割掩码。
    # 第3步：传播提示信息以在整个视频中获得掩码块
    # 为了在整个视频中获得掩码块，我们使用propagate_in_video API来传播提示信息。
    video_segments = {}  # video_segments contains the per-frame segmentation results
    for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
        video_segments[out_frame_idx] = {
            out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()
            for i, out_obj_id in enumerate(out_obj_ids)
        }

    # render the segmentation results every few frames
    vis_frame_stride = 30
    plt.close("all")
    for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
        plt.figure(figsize=(6, 4))
        plt.title(f"frame {out_frame_idx}")
        plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))
        for out_obj_id, out_mask in video_segments[out_frame_idx].items():
            show_mask_video(out_mask, plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-4.jpg'))

    # 第4步：添加新的提示信息以进一步细化掩码块
    # 看起来在上面输出的掩码块中，第150帧的边界细节存在一些小的瑕疵。
    # 利用SAM2，我们可以交互式地修正模型预测结果。我们可以在该帧的(x, y) = (82, 415)处添加一个标签为0的负点击来细化掩码块。
    # 这里我们调用add_new_points_or_boxAPI，并使用一个不同的frame_idx参数来指明我们想要细化的帧索引。
    ann_frame_idx = 150
    ann_obj_id = 1

    # show the segment before further refinement
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx} -- before refinement")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_mask_video(video_segments[ann_frame_idx][ann_obj_id], plt.gca(), obj_id=ann_obj_id)

    points = np.array([[82, 410]], dtype=np.float32)
    labels = np.array([0], np.int32)

    _, _, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the segment after the further refinement
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx} -- after refinement")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    show_mask_video((out_mask_logits > 0.0).cpu().numpy(), plt.gca(), obj_id=ann_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-5.jpg'))

    # 第5步：（再次）传播提示信息以在整个视频中获得掩码块
    # 让我们为整个视频获取一个更新后的掩码块。
    # 在这里，在添加了上述新的细化点击之后，我们再次调用propagate_in_video来传播所有的提示信息。
    video_segments = {}  # video_segments contains the per-frame segmentation results
    for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
        video_segments[out_frame_idx] = {
            out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()
            for i, out_obj_id in enumerate(out_obj_ids)
        }

    # render the segmentation results every few frames
    vis_frame_stride = 30
    plt.close("all")
    for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
        plt.figure(figsize=(6, 4))
        plt.title(f"frame {out_frame_idx}")
        plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))
        for out_obj_id, out_mask in video_segments[out_frame_idx].items():
            show_mask_video(out_mask, plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-6.jpg'))

    # Example2: Segment an object using box prompt
    predictor.reset_state(inference_state)

    ann_frame_idx = 0  # the frame index we interact with
    ann_obj_id = 4  # give a unique id to each object we interact with (it can be any integers)

    # Let's add a box at (x_min, y_min, x_max, y_max) = (300, 0, 500, 400) to get started
    box = np.array([300, 0, 500, 400], dtype=np.float32)
    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        box=box,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_box(box, plt.gca())
    show_mask_video((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0])
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-7.jpg'))

    # 在这里，尽管输入的边界框与对象并非完全贴合，但SAM2还是得到了一个对整个小孩相当不错的分割掩码。
    # 与前面的例子类似，如果使用边界框提示时返回的掩码并不完美，我们也可以通过正点击或负点击进一步细化输出。
    # 为了说明这一点，在这里我们在(x, y) = (460, 60)处进行一次标签为1的正点击，以扩展小孩头发周围的分割区域。
    # 注意：要细化来自边界框提示的分割掩码，在调用add_new_points_or_box时，我们需要同时发送原始的边界框输入以及所有后续的细化点击及其标签。
    ann_frame_idx = 0
    ann_obj_id = 4  # give a unique id to each object we interact with (it can be any integers)
    points = np.array([[460, 60]], dtype=np.float32)# Let's add a positive click at (x, y) = (460, 60) to refine the mask
    labels = np.array([1], np.int32)# for labels, `1` means positive click and `0` means negative click
    # note that we also need to send the original box input along with
    # the new refinement click together into `add_new_points_or_box`
    box = np.array([300, 0, 500, 400], dtype=np.float32)

    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
        box=box,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_box(box, plt.gca())
    show_points(points, labels, plt.gca())
    show_mask_video((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0])
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-8.jpg'))

    video_segments = {}  # video_segments contains the per-frame segmentation results
    for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
        video_segments[out_frame_idx] = {
            out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()
            for i, out_obj_id in enumerate(out_obj_ids)
        }

    # render the segmentation results every few frames
    vis_frame_stride = 30
    plt.close("all")
    for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
        plt.figure(figsize=(6, 4))
        plt.title(f"frame {out_frame_idx}")
        plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))
        for out_obj_id, out_mask in video_segments[out_frame_idx].items():
            show_mask_video(out_mask, plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-9.jpg'))

    # Example3: Segment multiple objects simultaneously 同时分割多个对象
    predictor.reset_state(inference_state)
    prompts = {}  # hold all the clicks we add for visualization

    #添加第一个点
    ann_frame_idx = 0  # the frame index we interact with
    ann_obj_id = 2  # give a unique id to each object we interact with (it can be any integers)
    # Let's add a positive click at (x, y) = (200, 300) to get started on the first object
    points = np.array([[200, 300]], dtype=np.float32)
    # for labels, `1` means positive click and `0` means negative click
    labels = np.array([1], np.int32)
    prompts[ann_obj_id] = points, labels

    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    for i, out_obj_id in enumerate(out_obj_ids):
        show_points(*prompts[out_obj_id], plt.gca())
        show_mask_video((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-10.jpg'))

    #只选择小孩哥的衣服
    # add the first object
    ann_frame_idx = 0  # the frame index we interact with
    ann_obj_id = 2  # give a unique id to each object we interact with (it can be any integers)
    # Let's add a 2nd negative click at (x, y) = (275, 175) to refine the first object
    # sending all clicks (and their labels) to `add_new_points_or_box`
    points = np.array([[200, 300], [275, 175]], dtype=np.float32)
    # for labels, `1` means positive click and `0` means negative click
    labels = np.array([1, 0], np.int32)
    prompts[ann_obj_id] = points, labels

    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the results on the current (interacted) frame
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    for i, out_obj_id in enumerate(out_obj_ids):
        show_points(*prompts[out_obj_id], plt.gca())
        show_mask_video((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-11.jpg'))

    # 添加第二个物体
    ann_frame_idx = 0  # the frame index we interact with
    ann_obj_id = 3  # give a unique id to each object we interact with (it can be any integers)
    # Let's now move on to the second object we want to track (giving it object id `3`)
    # with a positive click at (x, y) = (400, 150)
    points = np.array([[400, 150]], dtype=np.float32)
    # for labels, `1` means positive click and `0` means negative click
    labels = np.array([1], np.int32)
    prompts[ann_obj_id] = points, labels

    # `add_new_points_or_box` returns masks for all objects added so far on this interacted frame
    _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box(
        inference_state=inference_state,
        frame_idx=ann_frame_idx,
        obj_id=ann_obj_id,
        points=points,
        labels=labels,
    )

    # show the results on the current (interacted) frame on all objects
    plt.figure(figsize=(9, 6))
    plt.title(f"frame {ann_frame_idx}")
    plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))
    show_points(points, labels, plt.gca())
    for i, out_obj_id in enumerate(out_obj_ids):
        show_points(*prompts[out_obj_id], plt.gca())
        show_mask_video((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-12.jpg'))

    # 跟踪整个视频
    video_segments = {}  # video_segments contains the per-frame segmentation results
    for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
        video_segments[out_frame_idx] = {
            out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()
            for i, out_obj_id in enumerate(out_obj_ids)
        }

    # render the segmentation results every few frames
    vis_frame_stride = 30
    plt.close("all")
    for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
        plt.figure(figsize=(6, 4))
        plt.title(f"frame {out_frame_idx}")
        plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))
        for out_obj_id, out_mask in video_segments[out_frame_idx].items():
            show_mask_video(out_mask, plt.gca(), obj_id=out_obj_id)
    if show_image:
        plt.show()
    else:
        plt.savefig(osp.join(save_dir, 'video-13.jpg'))


if __name__ == '__main__':
    main()
