import os
import cv2
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import glob
from matplotlib import cm
import imageio
import torch
from mesh_center import mesh_center_r

class ImageDataset():
    def __init__(self, img_dir, dir2=None, save_memory = False):
        self.img_dir = img_dir
        if dir2 is None: 
            self.img_files = sorted(glob.glob(os.path.join(img_dir, 'video', '*')))
        else:
            self.img_files = sorted(glob.glob(os.path.join(img_dir, dir2, '*')))
        self.num_imgs = len(self.img_files)
        self.save_memory = save_memory
        self.images = self.load_imgs()# (T, H, W, C)
    
    def __len__(self):
        return self.num_imgs

    def __getitem__(self, idx):
        return self.images[idx]
        
    def load_imgs(self):
        images = []
        for i in range(self.num_imgs):
            imfile = self.img_files[i]
            image = Image.open(imfile)
            image = np.array(image).astype(np.uint8)
            images.append(image)
        images = np.stack(images)
        return images

class VideoComposite():
    def __init__(
        self,
        save_dir: str = "./video_composites",
        fps: int = 10,
        mode: str = "rainbow",  # 'cool', 'optical_flow'
        linewidth: int = 1,
        pointwidth: int = 2,
        show_first_frame: int = 10,
    ):
        self.mode = mode
        self.save_dir = save_dir
        if mode == "rainbow":
            self.color_map = cm.get_cmap("gist_rainbow")
        elif mode != "optical_flow":
            self.color_map = cm.get_cmap(mode)
        self.show_first_frame = show_first_frame

        self.linewidth = linewidth
        self.pointwidth = pointwidth
        self.fps = fps

    def save_video(self, images, filename):
        """
        Save a image tensor to a video file.

        Args:
            images (torch.Tensor): (1, 帧数, 通道数, 高, 宽)
            images (np.ndarray): 帧数 X (高, 宽, 通道数)
            filename (str): 
        """
        os.makedirs(self.save_dir, exist_ok=True)
        
        if isinstance(images, torch.Tensor):
            # (1, 帧数, 通道数, 高, 宽) -> 帧数 X (1, 通道数, 高, 宽)
            wide_list = list(images.unbind(1))
            # 帧数 X (1, 通道数, 高, 宽) -> 帧数 X (高, 宽, 通道数)
            wide_list = [wide[0].permute(1, 2, 0).cpu().numpy() for wide in wide_list]
        else:
            wide_list = images
            
        # Prepare the video file path
        save_path = os.path.join(self.save_dir, f"{filename}_composited.mp4")

        # Create a writer object
        video_writer = imageio.get_writer(save_path, fps=self.fps)

        # Write frames to the video file
        for frame in wide_list[2:-1]:
            video_writer.append_data(frame)

        video_writer.close()

        print(f"Video saved to {save_path}")



def data_load(images_path, queries_path, trajs_path, visib_path):
    image_data = ImageDataset(images_path)
    H,W = image_data[0].shape[-3:-1]
    print(f"image_data: {image_data.images.shape}, H: {H}, W: {W}")
    query_points = (np.load(queries_path)*np.array([W,H])).astype(np.uint32)
    print(f"query point sample: W X H = {query_points[0]}")
    # print(f"query_points: {query_points.shape}")
    # print(f"query_point: {query_points[724]}")
    trajs = np.load(trajs_path)[0]
    visibies = np.load(visib_path)[0]
    print(f"len of trajs: {len(trajs)}, number of points: {len(trajs[0])}, ndim of points: {trajs.shape[-1]}")
    print(f"visibility of points: {visibies.shape}")
    return image_data, query_points, trajs, visibies
def images_load(images_path):
    image_data = ImageDataset(images_path)
    H,W = image_data[0].shape[-3:-1]
    print(f"image_data: {image_data.images.shape}, H: {H}, W: {W}")
    return image_data

def traj_video_composite(images, track_traj, filname="test_video"):

    vctool = VideoComposite(save_dir=os.path.join(Elbow_WORK_DIR, "composites"))
    
    # 帧数 X (高, 宽, 通道数)
    H,W = images.shape[-3:-1]
    
    center_mask = np.zeros_like(images)
    valid_center_mask_points = []
    
    for i, point_coord in enumerate(track_traj):        
        # center_point = point_coord.astype(np.int32)
        center_point = point_coord
        
        valid_center_mask_points.append(center_point)
        # # first channel set 1
        # center_mask[i, center_point[0], center_point[1], 0] = 1
        
        valid_c_msk =mesh_center_r(center_point, height=H, width=W, r=15)
        
        center_mask[i, valid_c_msk[:,0], valid_c_msk[:,1], :] = 1

    vis = images # // 2
    
    vis *= (1 - center_mask)  # 目标区域设置为全黑
    # 目标区域设置为红色
    center_mask[:, :, :] = (255, 0, 0)  
    vis += center_mask 
    
    vctool.save_video(vis, filname)

def test_video_composite_all():

    image_data = images_load(images_path=Elbow_WORK_DIR)

    filname = "results_add_goe_feats_track_724_trajectory.txt"
    # results_add_goe_feats_track_724_trajectory
    # results_add_keypoints_track_724_trajectory
    # results_only_optical_flow_track_724_trajectory
    
    point_traj = np.loadtxt(os.path.join(Elbow_WORK_DIR, filname))
    
    images = image_data.images
    track_traj = point_traj.astype(np.int32)
    
    track_traj = track_traj[:,[1,0]]
    
    traj_video_composite(images, track_traj, filname=filname.split(".")[0])


def write_images_from_sam2mask():
    elbow_mask_dir = os.path.join(WORK_DIR, "elbow_thirds", "sam2_mask")
    masks = torch.load(os.path.join(elbow_mask_dir, "all_mask.pt"))
    print(masks.shape)
    # masks = masks[0] + masks[1]
    masks = masks.sum(0)
    # masks = masks.squeeze(0)
    # masks = masks.permute(0, 2, 3, 1)
    masks = masks.squeeze(1)
    
    # save mask to images
    mask_dir = os.path.join(WORK_DIR, "elbow_thirds", "mask")
    for i, mask in enumerate(masks): 
        mask = mask.numpy()
        # mask_int8 = mask.astype(np.uint8)
        # file name like "00000.png, 00001.png,..."
        mask_name = f"{i+1:05d}.png"
        mask_name = os.path.join(mask_dir, mask_name)
        save_binary_image_with_red_foreground(mask, mask_name)

        
def save_binary_image_with_red_foreground(binary_array, output_path):
    # 创建调色板，索引 0 为黑色（背景），索引 1 为红色（前景）
    palette = [0, 0, 0, 255, 0, 0]
    # 确保调色板长度为 768
    while len(palette) < 768:
        palette.extend([0, 0, 0])

    # 将二值数组转换为 PIL 图像，模式为 'P'（调色板模式）
    binary_image = Image.fromarray(binary_array.astype(np.uint8), mode='P')
    # 设置调色板
    binary_image.putpalette(palette)
    # 保存图像为 PNG 格式
    binary_image.save(output_path, format='PNG')
    


WORK_DIR = os.path.dirname(os.path.abspath(__file__))
Elbow_WORK_DIR = os.path.join(WORK_DIR, "videos/elbow")


if __name__ == '__main__':
    print()
    test_video_composite_all()
    # bear_WORK_DIR = os.path.join(WORK_DIR, "elbow_thirds")
    # image_data = ImageDataset(img_dir=bear_WORK_DIR, dir2="mask")
    # images = image_data.images
    # write_images_from_sam2mask()
    
