import os
from os import path
from argparse import ArgumentParser
import shutil

import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
from PIL import Image
from tqdm import tqdm

from inference.data.test_datasets import LongTestDataset, DAVISTestDataset, YouTubeVOSTestDataset
from inference.data.mask_mapper import MaskMapper
from model.network import XMem
from inference.inference_core import InferenceCore
import cv2

from inference.interact.interactive_utils import *
from inference.interact.interaction import *
from inference.interact.resource_manager import ResourceManager
from inference.interact.gui_utils import *


torch.set_grad_enabled(False)

if torch.cuda.is_available():
    device = torch.device("cuda")
elif torch.backends.mps.is_available():
    device = torch.device("mps")
else:
    device = torch.device("cpu")


# 原始图像路径
image_path = "/home/t/workspace/tld_-xmem/workspace/177661140-f690156b-1775-4cd7-acd7-1738a5c92f30/images"
# mask 文件夹路径
mask_path = "/home/t/langsam_for_tracking/logs/mask0/"

def load_masks_and_images(image_path, mask_path):
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    masks_list = []
    bboxes_list = []
    images_list = []

    # print(image_files)

    # 使用 tqdm 进度条
    pbar = tqdm(total=len(image_files), unit='image')

    for image_file in image_files:
        mask_dir = os.path.splitext(image_file)[0]
        mask_dir_path = os.path.join(mask_path, mask_dir)

        # print("image_file:", image_file)
        # print("mask_path:", mask_path)
        # print("mask_dir:", mask_dir)

        if os.path.isdir(mask_dir_path):
            mask_files = sorted([f for f in os.listdir(mask_dir_path) if f.endswith('.png')])
            masks = []
            bboxes = []

            # print(mask_files)

            for mask_file in mask_files:
                mask_path1 = os.path.join(mask_dir_path, mask_file)
                # print("mask_path:", mask_path1)
                # mask = np.array(cv2.imread(mask_path1))
                mask = Image.open(mask_path1)
                mask = np.array(mask)
                # 将 mask 中的最大值替换成1
                mask = np.where(mask > 0, 1, 0)
                # print("shape:", mask.shape)
                masks.append(mask)

                # 计算边界框
                rows = np.any(mask, axis=1)
                cols = np.any(mask, axis=0)
                rmin, rmax = np.where(rows)[0][[0, -1]]
                cmin, cmax = np.where(cols)[0][[0, -1]]
                bbox = [cmin, rmin, cmax, rmax]
                bboxes.append(bbox)

            masks_list.append(masks)
            bboxes_list.append(bboxes)

            image_path1 = os.path.join(image_path, image_file)
            image = cv2.imread(image_path1)
            images_list.append(image)

        pbar.update(1)

    return masks_list, bboxes_list, images_list

def compute_intersection_ratio(mask1, mask2):
    intersection = np.logical_and(mask1, mask2)
    union = np.logical_or(mask1, mask2)
    # ratio = np.sum(intersection) / np.sum(union)
    # 需要考虑 mask1 和 mask2 都是全 0 的情况
    if np.sum(union) == 0:
        return 0
    else:
        return np.sum(intersection) / np.sum(union)

def main():

    start_frame = 0
    obj_id = 3

    # Arguments parsing
    parser = ArgumentParser()
    parser.add_argument('--model', default='./saves/XMem.pth')
    parser.add_argument('--s2m_model', default='saves/s2m.pth')
    parser.add_argument('--fbrs_model', default='saves/fbrs.pth')

    """
    Priority 1: If a "images" folder exists in the workspace, we will read from that directory
    Priority 2: If --images is specified, we will copy/resize those images to the workspace
    Priority 3: If --video is specified, we will extract the frames to the workspace (in an "images" folder) and read from there

    In any case, if a "masks" folder exists in the workspace, we will use that to initialize the mask
    That way, you can continue annotation from an interrupted run as long as the same workspace is used.
    """
    parser.add_argument('--images', help='Folders containing input images.', default=None)
    parser.add_argument('--video', help='Video file readable by OpenCV.', default=None)
    parser.add_argument('--workspace', help='directory for storing buffered images (if needed) and output masks', default=None)

    parser.add_argument('--buffer_size', help='Correlate with CPU memory consumption', type=int, default=100)
    
    parser.add_argument('--num_objects', type=int, default=1)

    # Long-memory options
    # Defaults. Some can be changed in the GUI.
    parser.add_argument('--max_mid_term_frames', help='T_max in paper, decrease to save memory', type=int, default=10)
    parser.add_argument('--min_mid_term_frames', help='T_min in paper, decrease to save memory', type=int, default=5)
    parser.add_argument('--max_long_term_elements', help='LT_max in paper, increase if objects disappear for a long time', 
                                                    type=int, default=10000)
    parser.add_argument('--num_prototypes', help='P in paper', type=int, default=128) 

    parser.add_argument('--top_k', type=int, default=30)
    parser.add_argument('--mem_every', type=int, default=10)
    parser.add_argument('--deep_update_every', help='Leave -1 normally to synchronize with mem_every', type=int, default=-1)
    parser.add_argument('--no_amp', help='Turn off AMP', action='store_true')
    parser.add_argument('--size', default=480, type=int, 
            help='Resize the shorter side to this size. -1 to use original resolution. ')
    args = parser.parse_args()

    # create temporary workspace if not specified
    config = vars(args)
    config['enable_long_term'] = True
    config['enable_long_term_count_usage'] = True

    with torch.cuda.amp.autocast(enabled=not args.no_amp) if device.type == 'cuda' else nullcontext():

        # Load our checkpoint
        network = XMem(config, args.model, map_location=device).to(device).eval()
        if args.model is not None:
            model_weights = torch.load(args.model)
            network.load_weights(model_weights, init_as_zero_if_needed=True)
        else:
            print('No model loaded.')

        num_objects = 1
        processor = InferenceCore(network, config)
        processor.set_all_labels(list(range(1, num_objects+1)))

        # load masks and images
        masks_list, bboxes_list, images_list = load_masks_and_images(image_path, mask_path)

        num_frames = len(images_list)
        height, width, _ = images_list[0].shape[:3]

        print("num_frames:", num_frames)
        print("height:", height)
        print("width:", width)

        # current frame info
        curr_frame_dirty = False
        current_image = np.zeros((height, width, 3), dtype=np.uint8) 
        current_image_torch = None
        current_mask = np.zeros((height, width), dtype=np.uint8)
        # 将 current_prob 建立在 gpu 上
        current_prob = torch.zeros((num_objects, height, width), dtype=torch.float).cuda()
        # 检查 current_prob 是否在 gpu 上
        print("current_prob device:", current_prob.device)

        # 对 images_list[start_frame] 的第 obj_id 个对象进行可视化，绘制其 mask 和 bbox
        current_image = images_list[start_frame]
        current_mask = masks_list[start_frame][obj_id]
        current_bbox = bboxes_list[start_frame][obj_id]

        vis_frame = current_image.copy()
        vis_frame = cv2.rectangle(vis_frame, (current_bbox[0], current_bbox[1]), (current_bbox[2], current_bbox[3]), (0, 255, 0), 2)
        # vis_frame = cv2.addWeighted(vis_frame, 0.5, current_mask, 0.5, 0)

        # cv2.imshow("vis_frame", vis_frame)
        # cv2.waitKey(0)

        print("shape of current_mask:", current_mask.shape)
        print("num_objects:", num_objects)

        print("max of current_mask:", np.max(current_mask))
        print("min of current_mask:", np.min(current_mask))
        print(current_mask)

        current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
        current_prob = index_numpy_to_one_hot_torch(current_mask, num_objects+1).cuda()

        print("shape of current_prob:", current_prob.shape)
        print("shape of current_image_torch:", current_image_torch.shape)

        current_prob = processor.step(current_image_torch, current_prob[1:])

        display_duration_ms = 1

        sync_every = 1

        for t in range(start_frame, num_frames):
            print("t:", t)
            # current

            current_image = images_list[t]
            current_image_torch = None
            current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
            current_prob = processor.step(current_image_torch)
            current_image_masks = masks_list[t]

            # 从 current_prob 中提取 bbox
            current_mask = torch_prob_to_numpy_mask(current_prob)

            sum_of_current_mask = np.sum(current_mask)

            # 从 current_image_masks 中挑选一个与 current_mask 重叠程度最高的 mask
            max_intersection_ratio = 0
            max_intersection_mask = None
            for mask in current_image_masks:
                intersection_ratio = compute_intersection_ratio(mask, current_mask)
                sum_of_mask = np.sum(mask)
                inflation_ratio = sum_of_current_mask / sum_of_mask
                if intersection_ratio > max_intersection_ratio and intersection_ratio > 0.4 \
                    and sum_of_current_mask > 400 \
                    and not (inflation_ratio < 1.1 and inflation_ratio > 0.9) :
                    max_intersection_ratio = intersection_ratio
                    max_intersection_mask = mask

            if t > 0 and t % sync_every == 0 and max_intersection_mask is not None:
                # 将 max_intersection_mask 中的最大值替换成1
                max_intersection_mask_ = np.where(max_intersection_mask > 0, 1, 0)
                max_intersection_mask_torch = index_numpy_to_one_hot_torch(max_intersection_mask_, num_objects+1).cuda()
                current_prob = processor.step(current_image_torch, max_intersection_mask_torch[1:])

            # 首先，检测 current_mask 中是否有目标
            if np.max(current_mask) > 0:
                # 计算边界框
                rows = np.any(current_mask, axis=1)
                cols = np.any(current_mask, axis=0)
                rmin, rmax = np.where(rows)[0][[0, -1]]
                cmin, cmax = np.where(cols)[0][[0, -1]]
                current_bbox = [cmin, rmin, cmax, rmax]
                # 绘制 bbox
                vis_frame = current_image.copy()
                vis_frame = cv2.rectangle(vis_frame, (current_bbox[0], current_bbox[1]), (current_bbox[2], current_bbox[3]), (0, 255, 0), 2)
                # 同时用半透明红色绘制 mask
                # 首先，将 mask 转换为三通道红色图像
                red_mask = np.zeros((height, width, 3), dtype=np.uint8)
                red_mask[:, :, 2] = current_mask * 255
                vis_frame = cv2.addWeighted(vis_frame, 0.5, red_mask, 0.5, 0)

                if max_intersection_mask is not None:
                    red_max_intersection_mask = np.zeros((height, width, 3), dtype=np.uint8)
                    red_max_intersection_mask[:, :, 2] = max_intersection_mask * 255
                    vis_frame = cv2.addWeighted(vis_frame, 0.5, red_max_intersection_mask, 0.5, 0)
            else:
                # 绘制 bbox
                vis_frame = current_image.copy()
                
            cv2.imshow("vis_frame", vis_frame)
            cv2.waitKey(display_duration_ms)


if __name__ == '__main__':
    main()