import os
from os import path
from argparse import ArgumentParser
import shutil

import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
from PIL import Image
from tqdm import tqdm

from inference.data.test_datasets import LongTestDataset, DAVISTestDataset, YouTubeVOSTestDataset
from inference.data.mask_mapper import MaskMapper
from model.network import XMem
from inference.inference_core import InferenceCore
import cv2

from inference.interact.interactive_utils import *
from inference.interact.interaction import *
from inference.interact.resource_manager import ResourceManager
from inference.interact.gui_utils import *
import copy


torch.set_grad_enabled(False)

if torch.cuda.is_available():
    device = torch.device("cuda")
elif torch.backends.mps.is_available():
    device = torch.device("mps")
else:
    device = torch.device("cpu")


# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/177661140-f690156b-1775-4cd7-acd7-1738a5c92f30/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask0/"

# 原始图像路径
image_path = "/home/t/workspace/tld_-xmem/workspace/飞书20240520-194847/images"
# mask 文件夹路径
mask_path = "/home/t/langsam_for_tracking/logs/mask_sub2/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/飞书20240520-220058/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask_bike1/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/bird/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask_birds/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/pigs/images/"
# # mask 文件夹路径
# mask_path = "/home/t/workspace/langsam_for_tracking/logs/pigs/"

def load_masks_and_images(image_path, mask_path):
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    masks_list = []
    bboxes_list = []
    images_list = []

    # 使用 tqdm 进度条
    pbar = tqdm(total=len(image_files), unit='image')

    for image_file in image_files:
        mask_dir = os.path.splitext(image_file)[0]
        mask_dir_path = os.path.join(mask_path, mask_dir)

        if os.path.isdir(mask_dir_path):
            mask_files = sorted([f for f in os.listdir(mask_dir_path) if f.endswith('.png')])
            masks = []
            bboxes = []

            # print(mask_files)
            # print("====================================")

            for mask_file in mask_files:
                mask_path1 = os.path.join(mask_dir_path, mask_file)
                mask_file = Image.open(mask_path1)
                mask = np.array(mask_file)
                mask_file.close()
                # 将 mask 中的最大值替换成1
                mask = np.where(mask > 0, 1, 0)
                masks.append(mask)

                # 计算边界框
                rows = np.any(mask, axis=1)
                cols = np.any(mask, axis=0)
                rmin, rmax = np.where(rows)[0][[0, -1]]
                cmin, cmax = np.where(cols)[0][[0, -1]]
                bbox = [cmin, rmin, cmax, rmax]
                bboxes.append(bbox)

            masks_list.append(np.array(masks))
            bboxes_list.append(np.array(bboxes))

            image_path1 = os.path.join(image_path, image_file)
            image = cv2.imread(image_path1)
            images_list.append(image)

        pbar.update(1)

    return masks_list, bboxes_list, images_list

def load_masks_and_image(image_path, mask_path, frame_index):
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    image_file = image_files[frame_index]

    mask_dir = os.path.splitext(image_file)[0]
    mask_dir_path = os.path.join(mask_path, mask_dir)

    masks = []
    bboxes = []

    if os.path.isdir(mask_dir_path):
        mask_files = sorted([f for f in os.listdir(mask_dir_path) if f.endswith('.png')])
        for mask_file in mask_files:
            mask_path1 = os.path.join(mask_dir_path, mask_file)
            mask_file = Image.open(mask_path1)
            mask = np.array(mask_file)
            mask_file.close()
            # 将 mask 中的最大值替换成1
            mask = np.where(mask > 0, 1, 0)
            masks.append(mask)

            # 计算边界框
            rows = np.any(mask, axis=1)
            cols = np.any(mask, axis=0)
            rmin, rmax = np.where(rows)[0][[0, -1]]
            cmin, cmax = np.where(cols)[0][[0, -1]]
            bbox = [cmin, rmin, cmax, rmax]
            bboxes.append(bbox)

    image_path1 = os.path.join(image_path, image_file)
    image = cv2.imread(image_path1)

    return masks, bboxes, image

def get_num_frames(image_path):
    """
    从 image_path 目录中获取文件数量,作为 num_frames 的值
    """
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    return len(image_files)

def compute_intersection_ratio(mask1, mask2):
    intersection = np.logical_and(mask1, mask2)
    union = np.logical_or(mask1, mask2)
    # 需要考虑 mask1 和 mask2 都是全 0 的情况
    if np.sum(union) == 0:
        return 0
    else:
        return np.sum(intersection) / np.sum(union)

# 遍历所有 bbox，绘制 bbox，以及他们的编号
selected_bbox_index = -1
selected_bbox_indices = []
mouse_pos = (0, 0)

def main():

    start_frame = 0
    end_frame = 900
    obj_id = 3

    # Arguments parsing
    parser = ArgumentParser()
    parser.add_argument('--model', default='./saves/XMem.pth')
    parser.add_argument('--s2m_model', default='saves/s2m.pth')
    parser.add_argument('--fbrs_model', default='saves/fbrs.pth')

    parser.add_argument('--obj_id', type=int, default=0)

    """
    Priority 1: If a "images" folder exists in the workspace, we will read from that directory
    Priority 2: If --images is specified, we will copy/resize those images to the workspace
    Priority 3: If --video is specified, we will extract the frames to the workspace (in an "images" folder) and read from there

    In any case, if a "masks" folder exists in the workspace, we will use that to initialize the mask
    That way, you can continue annotation from an interrupted run as long as the same workspace is used.
    """
    parser.add_argument('--images', help='Folders containing input images.', default=None)
    parser.add_argument('--video', help='Video file readable by OpenCV.', default=None)
    parser.add_argument('--workspace', help='directory for storing buffered images (if needed) and output masks', default=None)

    parser.add_argument('--buffer_size', help='Correlate with CPU memory consumption', type=int, default=100)
    
    parser.add_argument('--num_objects', type=int, default=1)

    # Long-memory options
    # Defaults. Some can be changed in the GUI.
    parser.add_argument('--max_mid_term_frames', help='T_max in paper, decrease to save memory', type=int, default=10)
    parser.add_argument('--min_mid_term_frames', help='T_min in paper, decrease to save memory', type=int, default=5)
    parser.add_argument('--max_long_term_elements', help='LT_max in paper, increase if objects disappear for a long time', 
                                                    type=int, default=10000)
    parser.add_argument('--num_prototypes', help='P in paper', type=int, default=128) 

    parser.add_argument('--top_k', type=int, default=30)
    parser.add_argument('--mem_every', type=int, default=10)
    parser.add_argument('--deep_update_every', help='Leave -1 normally to synchronize with mem_every', type=int, default=-1)
    parser.add_argument('--no_amp', help='Turn off AMP', action='store_true')
    parser.add_argument('--size', default=480, type=int, 
            help='Resize the shorter side to this size. -1 to use original resolution. ')
    args = parser.parse_args()

    # create temporary workspace if not specified
    config = vars(args)
    config['enable_long_term'] = True
    config['enable_long_term_count_usage'] = True

    obj_id = args.obj_id

    with torch.cuda.amp.autocast(enabled=not args.no_amp) if device.type == 'cuda' else nullcontext():

        num_frames = get_num_frames(image_path)
        # 获取目录中第一张图像
        masks, bboxes, image0 = load_masks_and_image(image_path, mask_path, start_frame)
        height, width = image0.shape[:2]

        print("num_frames:", num_frames)
        print("height:", height)
        print("width:", width)

        def mouse_callback(event, x, y, flags, param):
            global selected_bbox_index, mouse_pos, selected_bbox_indices
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_pos = (x, y)
                for i, bbox in enumerate(bboxes):
                    if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
                        selected_bbox_index = i
                        if i not in selected_bbox_indices:
                            selected_bbox_indices.append(i)
                        print(f"Selected bbox index: {selected_bbox_index}")
                        break
            elif event == cv2.EVENT_MOUSEMOVE:
                mouse_pos = (x, y)

        cv2.namedWindow("vis_frame")
        cv2.setMouseCallback("vis_frame", mouse_callback)

        vis_frames = []

        while True:
            vis_frame = image0.copy()
            for i, bbox in enumerate(bboxes):
                if bbox[0] < mouse_pos[0] < bbox[2] and bbox[1] < mouse_pos[1] < bbox[3]:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
                else:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
                if i in selected_bbox_indices:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (200, 0, 200), 4)
                
                vis_frame = cv2.putText(vis_frame, str(i), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
            vis_frame = cv2.circle(vis_frame, mouse_pos, 10, (255, 0, 0), -1, cv2.LINE_AA)
            # vis_frames.append(vis_frame)
            cv2.imshow("vis_frame", vis_frame)
            if cv2.waitKey(30) & 0xFF == ord('q'):
                break


        if selected_bbox_indices:
            print(f"--------------Selected bbox indices: {selected_bbox_indices}")

        num_objects = len(selected_bbox_indices)

        # Load our checkpoint
        network = XMem(config, args.model, map_location=device).to(device).eval()
        if args.model is not None:
            model_weights = torch.load(args.model)
            network.load_weights(model_weights, init_as_zero_if_needed=True)
        else:
            print('No model loaded.')
        processor = InferenceCore(network, config)
        processor.set_all_labels(list(range(1, num_objects+1)))
        # current frame info
        curr_frame_dirty = False
        current_image = np.zeros((height, width, 3), dtype=np.uint8)
        current_image_torch = None
        current_mask = np.zeros((height, width), dtype=np.uint8)
        seperated_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
        # 将 current_prob 建立在 gpu 上
        current_prob = torch.zeros((num_objects, height, width), dtype=torch.float).cuda()
        # 检查 current_prob 是否在 gpu 上
        print("current_prob device:", current_prob.device)

        print("shape of current_mask:", current_mask.shape)
        print("num_objects:", num_objects)

        # 初始化 current_image 和 current_mask
        current_image = image0.copy()
        current_mask = masks[selected_bbox_index].copy()
        selected_masks = np.array(masks)[selected_bbox_indices]
        selected_masks = np.array(selected_masks)
        print("shape of selected_masks:", selected_masks.shape)

        # 计算每个位置的最大值的索引
        merged_mask = np.argmax(selected_masks, axis=0) + 1
        # 找出所有通道都为0的位置
        zero_mask = np.all(selected_masks == 0, axis=0)
        # 在这些位置上，将merged_mask设置为0
        merged_mask[zero_mask] = 0
        merged_mask = merged_mask.astype(np.uint8)
        print("shape of merged_mask:", merged_mask.shape)
        # 打印 merged_mask 的最大值
        print("max of merged_mask:", np.max(merged_mask))
        
        current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
        current_prob = index_numpy_to_one_hot_torch(merged_mask, num_objects+1).cuda()

        print("shape of current_prob:", current_prob.shape)
        print("shape of current_image_torch:", current_image_torch.shape)

        current_prob = processor.step(current_image_torch, current_prob[1:])

        display_duration_ms = 1

        sync_every = 1

        # 使用 tqdm 进度条
        pbar = tqdm(total=num_frames, unit='frame')

        for t in range(start_frame, num_frames):
            
            # 显示进度条
            pbar.update(1)

            detector_updated = False

            # current
            current_image_masks, _, current_image = load_masks_and_image(image_path, mask_path, t)
            current_image_torch = None
            current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
            current_prob = processor.step(current_image_torch)

            # 从 current_prob 中提取 bbox
            current_mask = torch_prob_to_numpy_mask(current_prob)

            #################### 检测器干预
            # 将 current_mask 分离成多个 mask，每个 mask 对应一个目标且最大值为1，保存在 seperated_masks 中
            seperated_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
            segmented_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
            valid_labels = []
            for i in range(num_objects):
                seperated_masks[i] = np.where(current_mask == i+1, 1, 0)

            for i in range(num_objects):
                _current_mask = seperated_masks[i]

                sum_of_current_mask = np.sum(_current_mask)

                # 从 current_image_masks 中挑选一个与 current_mask 重叠程度最高的 mask
                max_intersection_ratio = 0
                max_intersection_mask = None
                diff_of_intersection = 0.1
                mask_low_threshold = 400
                intersection_ratio_threshold = 0.4
                for mask in current_image_masks:
                    intersection_ratio = compute_intersection_ratio(mask, _current_mask)
                    sum_of_mask = np.sum(mask)
                    inflation_ratio = sum_of_current_mask / sum_of_mask
                    if intersection_ratio > max_intersection_ratio and intersection_ratio > intersection_ratio_threshold \
                        and sum_of_current_mask > mask_low_threshold \
                        and not (inflation_ratio < 1+diff_of_intersection and inflation_ratio > 1-diff_of_intersection) :
                        max_intersection_ratio = intersection_ratio
                        max_intersection_mask = mask
                if max_intersection_mask is not None:
                    segmented_masks[i] = max_intersection_mask
                    valid_labels.append(i+1)

            # 将 segmented_masks 重新merge成一个 mask
            merged_segmented_mask = np.argmax(segmented_masks, axis=0) + 1
            # 找出所有通道都为0的位置
            zero_mask = np.all(segmented_masks == 0, axis=0)
            # 在这些位置上，将merged_mask设置为0
            merged_segmented_mask[zero_mask] = 0
            merged_segmented_mask = merged_segmented_mask.astype(np.uint8)
            
            if t > 0 and t % sync_every == 0 and len(valid_labels) > 0:
                max_intersection_mask_torch = index_numpy_to_one_hot_torch(merged_segmented_mask, num_objects+1).cuda()
                current_prob = processor.step(current_image_torch, max_intersection_mask_torch[1:], valid_labels=valid_labels)
                detector_updated = True

            #################### end of 检测器干预

            # 首先，检测 current_mask 中是否有目标
            if np.max(current_mask) > 0:

                # 对于每个唯一的目标编号
                vis_frame = current_image.copy()
                for target in range(0, num_objects):
                    # 创建一个新的 mask，其中只包含该目标的像素
                    target_mask = (current_mask == target+1)

                    # 检测 target_mask 中是否有目标
                    if np.max(target_mask) == 0:
                        continue

                    # 计算边界框
                    rows = np.any(target_mask, axis=1)
                    cols = np.any(target_mask, axis=0)
                    rmin, rmax = np.where(rows)[0][[0, -1]]
                    cmin, cmax = np.where(cols)[0][[0, -1]]
                    target_bbox = [cmin, rmin, cmax, rmax]

                    # 如果当前目标在 valid_labels 中，在 bbox 的右上角显示 valid
                    if target+1 in valid_labels:
                        vis_frame = cv2.putText(vis_frame, "SAM-update", (target_bbox[2], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 200), 2, cv2.LINE_AA)
                        vis_frame = cv2.putText(vis_frame, "SAM-update", (target_bbox[2], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 200, 0), 1, cv2.LINE_AA)
                        # 绘制 bbox
                        vis_frame = cv2.rectangle(vis_frame, (target_bbox[0], target_bbox[1]), (target_bbox[2], target_bbox[3]), (0, 0, 200), 2)
                    else:
                        # 绘制 bbox
                        vis_frame = cv2.rectangle(vis_frame, (target_bbox[0], target_bbox[1]), (target_bbox[2], target_bbox[3]), (0, 255, 0), 2)
                    
                    # 绘制目标编号
                    vis_frame = cv2.putText(vis_frame, str(target), (target_bbox[0], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)

                # 同时用半透明红色绘制 mask
                # 首先，将 mask 转换为三通道红色图像
                red_mask = np.zeros((height, width, 3), dtype=np.uint8)
                red_mask[:, :, 2] = current_mask * 255
                # 先将 red_mask 直接绘制到 vis_frame 的copy上
                vis_frame_copy = vis_frame.copy()
                # vis_frame_copy 中，red_mask 有值的位置，直接设置为红色
                vis_frame_copy = np.where(red_mask > 0, red_mask, vis_frame_copy)
                vis_frame = cv2.addWeighted(vis_frame, 0.3, vis_frame_copy, 0.7, 0)

                # # 如果 detector 更新了，在这一帧的右上角显示 detector updated
                # if detector_updated:
                #     # 使用反锯齿字体
                #     vis_frame = cv2.putText(vis_frame, "segmentation updated", (500, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 200, 200), 4, cv2.LINE_AA)
                #     vis_frame = cv2.putText(vis_frame, "segmentation updated", (500, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)

            else:
                # 绘制 bbox
                vis_frame = current_image.copy()
                
            vis_frames.append(vis_frame)
            cv2.imshow("vis_frame", vis_frame)
            _k = cv2.waitKey(display_duration_ms)
            if _k == ord('q'):
                exit(0)

    # 将 vis_frames 保存为视频，帧率为 30，格式为 mp4
    out = cv2.VideoWriter('./workspace/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))
    for vis_frame in vis_frames:
        out.write(vis_frame)
    out.release()


if __name__ == '__main__':
    main()