import os
from os import path
from argparse import ArgumentParser
import shutil

import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
from PIL import Image
from tqdm import tqdm

from inference.data.test_datasets import LongTestDataset, DAVISTestDataset, YouTubeVOSTestDataset
from inference.data.mask_mapper import MaskMapper
from model.network import XMem
from inference.inference_core import InferenceCore
import cv2

from inference.interact.interactive_utils import *
from inference.interact.interaction import *
from inference.interact.resource_manager import ResourceManager
from inference.interact.gui_utils import *
import copy

import tkinter as tk
from tkinter import filedialog
import colorsys
import random
import math
import pickle

torch.set_grad_enabled(False)

if torch.cuda.is_available():
    device = torch.device("cuda")
elif torch.backends.mps.is_available():
    device = torch.device("mps")
else:
    device = torch.device("cpu")


# 原始图像路径
image_path = "/home/t/workspace/tld_-xmem/workspace/177661140-f690156b-1775-4cd7-acd7-1738a5c92f30/images"
# mask 文件夹路径
mask_path = "/home/t/langsam_for_tracking/logs/mask0/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/飞书20240520-194847/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask_sub2/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/飞书20240520-220058/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask_bike1/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/bird/images"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/mask_birds/"

# # 原始图像路径
# image_path = "/home/t/workspace/tld_-xmem/workspace/pigs/images/"
# # mask 文件夹路径
# mask_path = "/home/t/langsam_for_tracking/logs/pigs/"

# Open file chooser dialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
print("file_path:", file_path)
# 如果 file_path 是一个文件，提取文件名，把后缀和路径去掉
if path.isfile(file_path):
    file_name = path.basename(file_path)
    file_name = path.splitext(file_name)[0]
    print("file_name:", file_name)


def load_masks_and_images(image_path, mask_path):
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    masks_list = []
    bboxes_list = []
    images_list = []

    # 使用 tqdm 进度条
    pbar = tqdm(total=len(image_files), unit='image')

    for image_file in image_files:
        mask_dir = os.path.splitext(image_file)[0]
        mask_dir_path = os.path.join(mask_path, mask_dir)

        if os.path.isdir(mask_dir_path):
            mask_files = sorted([f for f in os.listdir(mask_dir_path) if f.endswith('.png')])
            masks = []
            bboxes = []

            # print(mask_files)
            # print("====================================")

            for mask_file in mask_files:
                mask_path1 = os.path.join(mask_dir_path, mask_file)
                mask_file = Image.open(mask_path1)
                mask = np.array(mask_file)
                mask_file.close()
                # 将 mask 中的最大值替换成1
                mask = np.where(mask > 0, 1, 0)
                masks.append(mask)

                # 计算边界框
                rows = np.any(mask, axis=1)
                cols = np.any(mask, axis=0)
                rmin, rmax = np.where(rows)[0][[0, -1]]
                cmin, cmax = np.where(cols)[0][[0, -1]]
                bbox = [cmin, rmin, cmax, rmax]
                bboxes.append(bbox)

            masks_list.append(np.array(masks))
            bboxes_list.append(np.array(bboxes))

            image_path1 = os.path.join(image_path, image_file)
            image = cv2.imread(image_path1)
            images_list.append(image)

        pbar.update(1)

    return masks_list, bboxes_list, images_list

def load_masks_and_image(image_path, mask_path, frame_index):
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    image_file = image_files[frame_index]

    mask_dir = os.path.splitext(image_file)[0]
    mask_dir_path = os.path.join(mask_path, mask_dir)

    masks = []
    bboxes = []

    if os.path.isdir(mask_dir_path):
        mask_files = sorted([f for f in os.listdir(mask_dir_path) if f.endswith('.png')])
        for mask_file in mask_files:
            mask_path1 = os.path.join(mask_dir_path, mask_file)
            mask_file = Image.open(mask_path1)
            mask = np.array(mask_file)
            mask_file.close()
            # 将 mask 中的最大值替换成1
            mask = np.where(mask > 0, 1, 0)
            masks.append(mask)

            # 计算边界框
            rows = np.any(mask, axis=1)
            cols = np.any(mask, axis=0)
            rmin, rmax = np.where(rows)[0][[0, -1]]
            cmin, cmax = np.where(cols)[0][[0, -1]]
            bbox = [cmin, rmin, cmax, rmax]
            bboxes.append(bbox)

    image_path1 = os.path.join(image_path, image_file)
    image = cv2.imread(image_path1)

    return masks, bboxes, image

def get_num_frames(image_path):
    """
    从 image_path 目录中获取文件数量,作为 num_frames 的值
    """
    image_files = sorted([f for f in os.listdir(image_path) if f.endswith('.jpg')])
    return len(image_files)

def compute_intersection_ratio(mask1, mask2):
    intersection = np.logical_and(mask1, mask2)
    union = np.logical_or(mask1, mask2)
    union_count = np.count_nonzero(union)
    if union_count == 0:
        return 0
    else:
        intersection_count = np.count_nonzero(intersection)
        return intersection_count / union_count


# 遍历所有 bbox，绘制 bbox，以及他们的编号
selected_bbox_index = -1
selected_bbox_indices = []
mouse_pos = (0, 0)

# 指定随机种子
random.seed(42)

def random_saturated_color():
    """生成一种高饱和度的随机颜色,返回RGB元组"""
    hue = random.random()
    saturation = 1.0
    value = 1.0
    r, g, b = [int(x * 255) for x in colorsys.hsv_to_rgb(hue, saturation, value)]
    return r, g, b

def color_diff(color1, color2):
    """计算两种RGB颜色之间的差异"""
    rm = 0.5 * (color1[0] + color2[0])
    r = color1[0] - color2[0]
    g = color1[1] - color2[1]
    b = color1[2] - color2[2]
    return math.sqrt((2 + rm) * r ** 2 + 4 * g ** 2 + (3 - rm) * b ** 2)

saturated_colors = []
max_iterations = 1000  # 最大迭代次数

def main():

    start_frame = 0
    end_frame = 900
    obj_id = 3

    # Arguments parsing
    parser = ArgumentParser()
    parser.add_argument('--model', default='./saves/XMem.pth')
    parser.add_argument('--s2m_model', default='saves/s2m.pth')
    parser.add_argument('--fbrs_model', default='saves/fbrs.pth')

    parser.add_argument('--obj_id', type=int, default=0)

    """
    Priority 1: If a "images" folder exists in the workspace, we will read from that directory
    Priority 2: If --images is specified, we will copy/resize those images to the workspace
    Priority 3: If --video is specified, we will extract the frames to the workspace (in an "images" folder) and read from there

    In any case, if a "masks" folder exists in the workspace, we will use that to initialize the mask
    That way, you can continue annotation from an interrupted run as long as the same workspace is used.
    """
    parser.add_argument('--images', help='Folders containing input images.', default=None)
    parser.add_argument('--video', help='Video file readable by OpenCV.', default=None)
    parser.add_argument('--workspace', help='directory for storing buffered images (if needed) and output masks', default=None)

    parser.add_argument('--buffer_size', help='Correlate with CPU memory consumption', type=int, default=100)
    
    parser.add_argument('--num_objects', type=int, default=1)

    # Long-memory options
    # Defaults. Some can be changed in the GUI.
    parser.add_argument('--max_mid_term_frames', help='T_max in paper, decrease to save memory', type=int, default=10)
    parser.add_argument('--min_mid_term_frames', help='T_min in paper, decrease to save memory', type=int, default=5)
    parser.add_argument('--max_long_term_elements', help='LT_max in paper, increase if objects disappear for a long time', 
                                                    type=int, default=10000)
    parser.add_argument('--num_prototypes', help='P in paper', type=int, default=128) 

    parser.add_argument('--top_k', type=int, default=30)
    parser.add_argument('--mem_every', type=int, default=10)
    parser.add_argument('--deep_update_every', help='Leave -1 normally to synchronize with mem_every', type=int, default=-1)
    parser.add_argument('--no_amp', help='Turn off AMP', action='store_true')
    parser.add_argument('--size', default=480, type=int, 
            help='Resize the shorter side to this size. -1 to use original resolution. ')
    args = parser.parse_args()

    # create temporary workspace if not specified
    config = vars(args)
    config['enable_long_term'] = True
    config['enable_long_term_count_usage'] = True

    obj_id = args.obj_id

    # 生成100种随机颜色
    saturated_colors = [random_saturated_color() for _ in range(100)]
    saturated_colors_mat = np.array(saturated_colors)

    with torch.cuda.amp.autocast(enabled=not args.no_amp) if device.type == 'cuda' else nullcontext():

        num_frames = get_num_frames(image_path)
        # 获取目录中第一张图像
        masks, bboxes, image0 = load_masks_and_image(image_path, mask_path, start_frame)
        height, width = image0.shape[:2]

        print("num_frames:", num_frames)
        print("height:", height)
        print("width:", width)

        def mouse_callback(event, x, y, flags, param):
            global selected_bbox_index, mouse_pos, selected_bbox_indices
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_pos = (x, y)
                for i, bbox in enumerate(bboxes):
                    if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
                        selected_bbox_index = i
                        if i not in selected_bbox_indices:
                            selected_bbox_indices.append(i)
                        print(f"Selected bbox index: {selected_bbox_index}")
                        break
            elif event == cv2.EVENT_MOUSEMOVE:
                mouse_pos = (x, y)

        cv2.namedWindow("Segmentation Augmented Tracking")
        cv2.setMouseCallback("Segmentation Augmented Tracking", mouse_callback)

        # Create a flag to track if the button is clicked
        button_clicked = False

        # Button callback function
        def button_callback(event, x, y, flags, param):
            nonlocal button_clicked
            if event == cv2.EVENT_LBUTTONDOWN:
                button_clicked = True
        
        # # Create the button
        # cv2.createButton("Exit", button_callback, None,cv2.QT_PUSH_BUTTON,1)

        vis_frames = []

        while True:
            vis_frame = image0.copy()
            for i, bbox in enumerate(bboxes):
                if bbox[0] < mouse_pos[0] < bbox[2] and bbox[1] < mouse_pos[1] < bbox[3]:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
                else:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
                if i in selected_bbox_indices:
                    vis_frame = cv2.rectangle(vis_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (200, 0, 200), 4)
                
                vis_frame = cv2.putText(vis_frame, str(i), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
            vis_frame = cv2.circle(vis_frame, mouse_pos, 10, (255, 0, 0), -1, cv2.LINE_AA)
            # vis_frames.append(vis_frame)
            cv2.imshow("Segmentation Augmented Tracking", vis_frame)
            if cv2.waitKey(30) & 0xFF == ord('q'):
                break

        if selected_bbox_indices:
            print(f"--------------Selected bbox indices: {selected_bbox_indices}")

        num_objects = len(selected_bbox_indices)

        target_imgs = [None for _ in range(num_objects)]
        target_kfs = [None for _ in range(num_objects)]

        # 制造 num_objects 张图像，每个图像都是 satuated_colors[i] 的颜色
        color_imgs = []
        for i in range(num_objects):
            color_img = image0.copy()
            color_img[:, :] = saturated_colors[i]
            color_imgs.append(color_img)

        # Load our checkpoint
        network = XMem(config, args.model, map_location=device).to(device).eval()
        if args.model is not None:
            model_weights = torch.load(args.model)
            network.load_weights(model_weights, init_as_zero_if_needed=True)
        else:
            print('No model loaded.')
        processor = InferenceCore(network, config)
        processor.set_all_labels(list(range(1, num_objects+1)))
        # current frame info
        curr_frame_dirty = False
        current_image = np.zeros((height, width, 3), dtype=np.uint8)
        current_image_torch = None
        current_mask = np.zeros((height, width), dtype=np.uint8)
        seperated_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
        # 将 current_prob 建立在 gpu 上
        current_prob = torch.zeros((num_objects, height, width), dtype=torch.float).cuda()
        # 检查 current_prob 是否在 gpu 上
        print("current_prob device:", current_prob.device)

        print("shape of current_mask:", current_mask.shape)
        print("num_objects:", num_objects)

        # 初始化 current_image 和 current_mask
        current_image = image0.copy()
        current_mask = masks[selected_bbox_index].copy()
        selected_masks = np.array(masks)[selected_bbox_indices]
        selected_masks = np.array(selected_masks)
        print("shape of selected_masks:", selected_masks.shape)

        # 计算每个位置的最大值的索引
        merged_mask = np.argmax(selected_masks, axis=0) + 1
        # 找出所有通道都为0的位置
        zero_mask = np.all(selected_masks == 0, axis=0)
        # 在这些位置上，将merged_mask设置为0
        merged_mask[zero_mask] = 0
        merged_mask = merged_mask.astype(np.uint8)
        print("shape of merged_mask:", merged_mask.shape)
        # 打印 merged_mask 的最大值
        print("max of merged_mask:", np.max(merged_mask))
        
        current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
        current_prob = index_numpy_to_one_hot_torch(merged_mask, num_objects+1).cuda()

        print("shape of current_prob:", current_prob.shape)
        print("shape of current_image_torch:", current_image_torch.shape)

        current_prob = processor.step(current_image_torch, current_prob[1:])

        display_duration_ms = 1

        sync_every = 1

        # 使用 tqdm 进度条
        pbar = tqdm(total=num_frames, unit='frame')

        ground_truth_bbox = []

        for t in range(start_frame, num_frames):
            
            # 显示进度条
            pbar.update(1)

            detector_updated = False

            # current
            current_image_masks, _, current_image = load_masks_and_image(image_path, mask_path, t)
            current_image_torch = None
            current_image_torch, current_image_torch_no_norm = image_to_torch(current_image)
            current_prob = processor.step(current_image_torch)

            # 将 current_image_masks 合并为一张 mask
            current_mask_all = np.zeros((height, width), dtype=np.uint8)
            current_mask_zero = np.zeros((height, width, 3), dtype=np.uint8)
            for mask in current_image_masks:
                current_mask_all = np.where(mask > 0, mask, current_mask_all)
            current_mask_all = current_mask_all.astype(np.uint8)*255
            # 将 current_mask_all 变成三通道的
            current_mask_all = cv2.merge([current_mask_all, current_mask_all, current_mask_all])
            current_mask_all_cpy = current_mask_all.copy()

            # 从 current_prob 中提取 bbox
            current_mask = torch_prob_to_numpy_mask(current_prob)

            #################### 检测器干预
            # 将 current_mask 分离成多个 mask，每个 mask 对应一个目标且最大值为1，保存在 seperated_masks 中
            seperated_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
            segmented_masks = np.zeros((num_objects, height, width), dtype=np.uint8)
            valid_labels = []
            for i in range(num_objects):
                seperated_masks[i] = np.where(current_mask == i+1, 1, 0)

            for i in range(num_objects):
                _current_mask = seperated_masks[i]

                sum_of_current_mask = np.sum(_current_mask)

                # 从 current_image_masks 中挑选一个与 current_mask 重叠程度最高的 mask
                max_intersection_ratio = 0
                max_intersection_mask = None
                diff_of_intersection = 0.1
                mask_low_threshold = 400
                intersection_ratio_threshold = 0.4
                for mask in current_image_masks:
                    intersection_ratio = compute_intersection_ratio(mask, _current_mask)
                    sum_of_mask = np.sum(mask)
                    inflation_ratio = sum_of_current_mask / sum_of_mask
                    if intersection_ratio > max_intersection_ratio and intersection_ratio > intersection_ratio_threshold \
                        and sum_of_current_mask > mask_low_threshold \
                        and not (inflation_ratio < 1+diff_of_intersection and inflation_ratio > 1-diff_of_intersection) :
                        max_intersection_ratio = intersection_ratio
                        max_intersection_mask = mask
                if max_intersection_mask is not None:
                    segmented_masks[i] = max_intersection_mask
                    valid_labels.append(i+1)

            # 将 segmented_masks 重新merge成一个 mask
            merged_segmented_mask = np.where(np.max(segmented_masks, axis=0) > 0, 
                                                np.argmax(segmented_masks, axis=0) + 1, 
                                                0).astype(np.uint8)
            
            if t > 0 and t % sync_every == 0 and len(valid_labels) > 0:
                max_intersection_mask_torch = index_numpy_to_one_hot_torch(merged_segmented_mask, num_objects+1).cuda()
                current_prob = processor.step(current_image_torch, max_intersection_mask_torch[1:], valid_labels=valid_labels)
                detector_updated = True
                current_mask = torch_prob_to_numpy_mask(current_prob)

            # find most similar mask in current_image_masks
            gt_frame = []
            for target in range(0, num_objects):
                # 创建一个新的 mask，其中只包含该目标的像素
                target_mask = (current_mask == target+1)

                # 检测 target_mask 中是否有目标
                if np.max(target_mask) == 0:
                    gt_frame.append(None)
                    continue

                # 从 current_image_masks 中挑选一个与 current_mask 重叠程度最高的 mask
                max_intersection_ratio = 0
                max_intersection_mask = None
                mask_low_threshold = 400
                intersection_ratio_threshold = 0.6
                for mask in current_image_masks:
                    intersection_ratio = compute_intersection_ratio(mask, target_mask)
                    if intersection_ratio > max_intersection_ratio and intersection_ratio > intersection_ratio_threshold :
                        max_intersection_ratio = intersection_ratio
                        max_intersection_mask = mask
                if max_intersection_mask is not None:
                    # get bbox of max_intersection_mask
                    rows = np.any(max_intersection_mask, axis=1)
                    cols = np.any(max_intersection_mask, axis=0)
                    rmin, rmax = np.where(rows)[0][[0, -1]]
                    cmin, cmax = np.where(cols)[0][[0, -1]]
                    target_bbox = [cmin, rmin, cmax, rmax]
                    gt_frame.append(target_bbox)
                else:
                    # get bbox of target_mask
                    rows = np.any(target_mask, axis=1)
                    cols = np.any(target_mask, axis=0)
                    rmin, rmax = np.where(rows)[0][[0, -1]]
                    cmin, cmax = np.where(cols)[0][[0, -1]]
                    target_bbox = [cmin, rmin, cmax, rmax]
                    gt_frame.append(target_bbox)

            # print(gt_frame)

            ground_truth_bbox.append(gt_frame)


            # 对于 current_mask_all 中的所有像素，查找 current_mask 中的值，如果不是0，设置 current_mask_all 的颜色为 saturated_colors[current_mask 中的值] 的颜色
            # 找到 current_mask 中所有非零像素的位置
            mask_indices = np.nonzero(current_mask)
            # 获取这些非零像素在 current_mask 中的值（即对应的 mask ID）
            mask_ids = current_mask[mask_indices] - 1
            # 根据 mask ID 获取对应的颜色值
            colors = saturated_colors_mat[mask_ids]
            # 将颜色值赋给 current_mask_all 中对应的位置
            current_mask_all[mask_indices] = colors
            current_mask_zero[mask_indices] = colors

            #################### end of 检测器干预

            # 首先，检测 current_mask 中是否有目标
            if np.max(current_mask) > 0:

                # 将 current_mask_zero 与 current_image 进行0.7的融合
                vis_frame = cv2.addWeighted(current_image, 0.7, current_mask_zero, 0.3, 0)

                for target in range(0, num_objects):
                    # 创建一个新的 mask，其中只包含该目标的像素
                    target_mask = (current_mask == target+1)

                    # 检测 target_mask 中是否有目标
                    if np.max(target_mask) == 0:
                        continue

                    # 计算边界框
                    rows = np.any(target_mask, axis=1)
                    cols = np.any(target_mask, axis=0)
                    rmin, rmax = np.where(rows)[0][[0, -1]]
                    cmin, cmax = np.where(cols)[0][[0, -1]]
                    target_bbox = [cmin, rmin, cmax, rmax]
                    # 提取 current_image 的 target_mask 部分，加入 target_imgs
                    target_img_masked = current_image.copy()
                    target_img_masked[target_mask == 0] = 0
                    target_imgs[target] = target_img_masked[rmin:rmax, cmin:cmax, :]

                    # 如果当前目标在 valid_labels 中，在 bbox 的右上角显示 valid
                    if target+1 in valid_labels:
                        vis_frame = cv2.putText(vis_frame, "SAM-update", (target_bbox[2], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 200), 2, cv2.LINE_AA)
                        vis_frame = cv2.putText(vis_frame, "SAM-update", (target_bbox[2], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 200, 0), 1, cv2.LINE_AA)
                        # 绘制 bbox
                        vis_frame = cv2.rectangle(vis_frame, (target_bbox[0], target_bbox[1]), (target_bbox[2], target_bbox[3]), (0, 0, 200), 2)
                    else:
                        # 绘制 bbox
                        vis_frame = cv2.rectangle(vis_frame, (target_bbox[0], target_bbox[1]), (target_bbox[2], target_bbox[3]), saturated_colors[target], 2)
                    
                    # 绘制目标编号
                    vis_frame = cv2.putText(vis_frame, str(target), (target_bbox[0], target_bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, saturated_colors[target], 2, cv2.LINE_AA)

            else:
                # 绘制 bbox
                vis_frame = current_image.copy()

            vis_frame = cv2.putText(vis_frame, "tracking result", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
            
            cv2.imshow("Segmentation Augmented Tracking", vis_frame)
            _k = cv2.waitKey(display_duration_ms)
            if _k == ord('q'):
                exit(0)
            elif _k == ord(' '):
                if display_duration_ms == 0:
                    display_duration_ms = 1
                else:
                    display_duration_ms = 0

        # write ground_truth_bbox into a parsable json file
        with open('./workspace/bbox_list.pkl', 'wb') as file:
            pickle.dump(ground_truth_bbox, file, protocol=pickle.HIGHEST_PROTOCOL)


if __name__ == '__main__':
    main()