import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread

import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm

# from utils.utils import xyxy2xywh, xywh2xyxy

# '''
# ，帮我把1280x768的图像（padding后的）裁剪为640x768的图像。
# 如果没有标签，则随机裁剪；如果有一个标签，则裁剪处距离方框右边至少有40pixel，
# 左边至少0pixel，两个条件都不符合则该组数据全为0（空白数据）；如果有两个数据，
# 则直接对半切分，任意取其中一份，以及对应的标签数据。事件图片和标签数据也要根据裁剪做出变化
# '''

# def crop_augment(img, event_img, labels):
#     """
#     对1280x768的图像（padding后）裁剪为640x768，事件图和标签同步裁剪。
#     labels: shape (N, 5), 格式[x1, y1, x2, y2, c]，像素坐标
#     """
#     H, W, _ = img.shape
#     assert W == 1280 and H == 768, "输入图像尺寸必须为1280x768"

#     # 情况1：无标签，随机裁剪
#     if labels.shape[0] == 0:
#         left = np.random.randint(0, W - 640 + 1)
#         right = left + 640
#         img_crop = img[:, left:right, :]
#         event_crop = event_img[:, left:right, :]
#         labels_crop = labels.copy()
#         return img_crop, event_crop, labels_crop

#     # 情况2：一个标签
#     elif labels.shape[0] == 1:
#         x1 = int(labels[0, 0])
#         x2 = int(labels[0, 2])

#         # 条件2：裁剪后图片右边到方框右边 > 40 pixel
#         # 即 right - x2 > 40  => left > x2 - 600
#         min_left = max(x2 - 600 + 1, 0)
#         # 条件1：裁剪后图片左边到方框左边 > 0 pixel
#         # 即 x1 - left > 0  => left < x1
#         max_left = min(x1 - 1, W - 640)

#         # 必须 min_left <= max_left 才有合法区间
#         if min_left > max_left:
#             # 不满足条件，返回全0
#             img_crop = np.zeros((H, 640, 3), dtype=img.dtype)
#             event_crop = np.zeros((H, 640, 3), dtype=event_img.dtype)
#             labels_crop = np.zeros((0, 5), dtype=labels.dtype)
#             return img_crop, event_crop, labels_crop

#         # 随机选一个合法的left
#         left = np.random.randint(min_left, max_left + 1)
#         right = left + 640
#         img_crop = img[:, left:right, :]
#         event_crop = event_img[:, left:right, :]
#         # 标签同步偏移
#         labels_crop = labels.copy()
#         labels_crop[:, 0] -= left
#         labels_crop[:, 2] -= left
#         # 剪裁后超出范围的标签设为0
#         if labels_crop[0, 0] < 0 or labels_crop[0, 2] > 640:
#             labels_crop = np.zeros((0, 5), dtype=labels.dtype)
#         return img_crop, event_crop, labels_crop

#     # 情况3：两个标签
#     elif labels.shape[0] == 2:
#         # 取中线
#         mid = W // 2
#         # 随机选左半还是右半
#         if np.random.rand() < 0.5:
#             left = 0
#             right = 640
#             mask = (labels[:, 2] <= right)
#         else:
#             left = 640
#             right = 1280
#             mask = (labels[:, 0] >= left)
#         img_crop = img[:, left:right, :]
#         event_crop = event_img[:, left:right, :]
#         # 只保留在裁剪区域内的标签
#         labels_crop = labels[mask].copy()
#         labels_crop[:, 0] -= left
#         labels_crop[:, 2] -= left
#         # 剪裁后超出范围的标签设为0
#         valid = (labels_crop[:, 0] >= 0) & (labels_crop[:, 2] <= 640)
#         labels_crop = labels_crop[valid]
#         return img_crop, event_crop, labels_crop

#     # 其他情况，直接返回空
#     else:
#         img_crop = np.zeros((H, 640, 3), dtype=img.dtype)
#         event_crop = np.zeros((H, 640, 3), dtype=event_img.dtype)
#         labels_crop = np.zeros((0, 5), dtype=labels.dtype)
#         return img_crop, event_crop, labels_crop



def event2img(event,num_frame = 1,w = 1280, h = 720):

    # event (N,) 'x' 'y' 'p' 't' 
    # print(event[0]['t']) 

    if num_frame == 1:

        image = np.zeros((h,w,3),dtype=np.uint8)

        for e in event: 

            # p =-1 100 100 100
            #p= 1 200 200 200 
            #new one will cover old one

            if e['p'] == 1:
                image[int(e['y']),int(e['x']),...] = [200,200,200]
            else:
                image[int(e['y']),int(e['x']),...] = [100,100,100]

        return image

    else:
        # 获取所有t的范围
        # t_list = np.array([e['t'] for e in event])
        t_min, t_max = int(e[0]['t']), int(e[-1]['t'])
        # 按时间均分为num_frame段
        t_bins = np.linspace(t_min, t_max, num_frame + 1)
        images = [np.zeros((h, w, 3), dtype=np.uint8) for _ in range(num_frame)]
        for e in event:
            # 找到属于哪个时间段
            frame_idx = np.searchsorted(t_bins, e['t'], side='right') - 1
            frame_idx = min(max(frame_idx, 0), num_frame - 1)
            if e['p'] == 1:
                images[frame_idx][int(e['y']), int(e['x']), ...] = [200, 200, 200]
            else:
                images[frame_idx][int(e['y']), int(e['x']), ...] = [100, 100, 100]
        return images

import torch
import numpy as np

def event2img_gpu(event, num_frame=1, w=1280, h=720, device='cuda'):
    """
    使用GPU加速的事件数据转图像函数
    确保同一像素点上最新事件覆盖旧事件
    event: (N,) 包含 'x', 'y', 'p', 't' 的结构化数组
    num_frame: 要生成的帧数
    w, h: 图像宽高
    device: 计算设备
    """

    
    # 将事件数据转换为PyTorch张量
    if isinstance(event, np.ndarray):
        x = torch.from_numpy(event['x'].astype(np.int64)).to(device)
        y = torch.from_numpy(event['y'].astype(np.int64)).to(device)
        p = torch.from_numpy(event['p'].astype(np.int32)).to(device)
        t = torch.from_numpy(event['t'].astype(np.float32)).to(device)

        
    else:
        # 如果是列表或其他格式，需要转换
        x = torch.tensor([e['x'] for e in event], device=device, dtype=torch.long)
        y = torch.tensor([e['y'] for e in event], device=device, dtype=torch.long)
        p = torch.tensor([e['p'] for e in event], device=device, dtype=torch.long)
        t = torch.tensor([e['t'] for e in event], device=device, dtype=torch.float32)
    
    # 确保坐标在有效范围内
    x = torch.clamp(x, 0, w - 1)
    y = torch.clamp(y, 0, h - 1)
    
    if num_frame == 1:
        # 单帧情况 - 使用scatter操作确保最新事件覆盖旧事件
        image = torch.zeros((h, w, 3), device=device, dtype=torch.uint8)
        
        # 直接按事件顺序绘制，后面的事件自动覆盖前面的
        # 创建颜色映射
        colors = torch.zeros((len(p), 3), device=device, dtype=torch.uint8)
        colors[p == 1] = torch.tensor([200, 200, 200], device=device, dtype=torch.uint8)
        colors[p <  1] = torch.tensor([100, 100, 100], device=device, dtype=torch.uint8)
        
        # 使用scatter_直接赋值，后面的事件会覆盖前面的
        image[y, x] = colors
        
        return image.cpu().numpy()
    
    else:# 多帧情况
        if p.numel() == 0:
            images = []
            for i in range(num_frame):
                frame_image = torch.zeros((h, w, 3), device=device, dtype=torch.uint8)
                images.append(frame_image.cpu().numpy())
        
            return images
        
        else:
            t_min, t_max = t.min(), t.max()
            t_bins = torch.linspace(t_min, t_max, num_frame + 1, device=device)
            
            # 为每个事件分配帧索引
            frame_indices = torch.bucketize(t, t_bins, right=True) - 1
            frame_indices = torch.clamp(frame_indices, 0, num_frame - 1)
            
            images = []
            for i in range(num_frame):
                # 选择当前帧的事件
                mask = (frame_indices == i)
                frame_x = x[mask]
                frame_y = y[mask]
                frame_p = p[mask]
                
                frame_image = torch.zeros((h, w, 3), device=device, dtype=torch.uint8)
                
                if len(frame_x) > 0:
                    # 为当前帧的事件创建颜色
                    frame_colors = torch.zeros((len(frame_p), 3), device=device, dtype=torch.uint8)
                    frame_colors[frame_p == 1] = torch.tensor([200, 200, 200], device=device, dtype=torch.uint8)
                    frame_colors[frame_p <  1] = torch.tensor([100, 100, 100], device=device, dtype=torch.uint8)
                    
                    # 按时间顺序绘制，后面的事件覆盖前面的
                    frame_image[frame_y, frame_x] = frame_colors
                
                images.append(frame_image.cpu().numpy())
            
            return images



def event2img_count_raw(event, num_frame=1, w=1280, h=720, device='cuda'):
    """
    将事件数据转换为计数图像
    - 初始值为128
    - 正事件: +1
    - 负事件: -1 or 0
    - 限制在[0, 255]范围内
    """
    
    # 将事件数据转移到GPU
    if isinstance(event, list):
        # 如果是事件列表，转换为张量
        x_coords = torch.tensor([e['x'] for e in event], dtype=torch.long, device=device)
        y_coords = torch.tensor([e['y'] for e in event], dtype=torch.long, device=device)
        p_vals = torch.tensor([e['p'] for e in event], dtype=torch.float32, device=device)
        if num_frame > 1:
            t_vals = torch.tensor([e['t'] for e in event], dtype=torch.float32, device=device)
    else:
        # 如果已经是张量格式
        x_coords = torch.from_numpy(event['x'].astype(np.int32)).to(device)
        y_coords = torch.from_numpy(event['y'].astype(np.int64)).to(device)
        p_vals = torch.from_numpy(event['p'].astype(np.float32)).to(device)
        if num_frame > 1:
            t_vals = torch.from_numpy(event['t'].astype(np.float32)).to(device)
    
    if num_frame == 1:
        # 单帧处理
        # 初始化图像为128
        image = torch.full((h, w), 128, dtype=torch.float32, device=device)
        
        # 创建正事件和负事件的掩码
        pos_mask = (p_vals == 1)
        neg_mask = (p_vals <1) # ==-1 有可能不是-1而是0！！！
        
        # 使用bincount进行累加
        pos_counts = torch.bincount(y_coords[pos_mask] * w + x_coords[pos_mask], minlength=h*w).reshape(h, w)
        neg_counts = torch.bincount(y_coords[neg_mask] * w + x_coords[neg_mask], minlength=h*w).reshape(h, w)
        
        # 计算最终图像
        image = image + pos_counts - neg_counts
        
        # 限制在[0, 255]范围内
        image = torch.clamp(image, 0, 255)
        
        # 转换为uint8并返回
        return image.byte().cpu().numpy()
    
    else:
        # 多帧处理
        # 获取时间范围
        if t_vals.numel() == 0:
            images = []
            for i in range(num_frame):
                # 初始化图像为128
                frame_image = torch.full((h, w), 128, dtype=torch.float32, device=device)

                # 限制在[0, 255]范围内
                frame_image = torch.clamp(frame_image, 0, 255)
                
                # 转换为uint8并添加到结果列表
                images.append(frame_image.byte().cpu().numpy())
            
            return images
        else:

            t_min, t_max = t_vals.min(), t_vals.max()
            
            # 创建时间分箱
            t_bins = torch.linspace(t_min, t_max, num_frame + 1, device=device)
            
            # 为每个帧分配事件
            frame_indices = torch.bucketize(t_vals, t_bins, right=True) - 1
            frame_indices = torch.clamp(frame_indices, 0, num_frame - 1)
            
            images = []
            for i in range(num_frame):
                # 初始化图像为128
                frame_image = torch.full((h, w), 128, dtype=torch.float32, device=device)
                
                # 获取当前帧的事件
                frame_mask = (frame_indices == i)
                frame_x = x_coords[frame_mask]
                frame_y = y_coords[frame_mask]
                frame_p = p_vals[frame_mask]
                
                # 正负事件掩码
                pos_mask = (frame_p == 1)
                neg_mask = (frame_p <  1)
                
                # 使用bincount进行累加
                if len(frame_x) > 0:
                    pos_counts = torch.bincount(frame_y[pos_mask] * w + frame_x[pos_mask], minlength=h*w).reshape(h, w)
                    neg_counts = torch.bincount(frame_y[neg_mask] * w + frame_x[neg_mask], minlength=h*w).reshape(h, w)
                    
                    frame_image = frame_image + pos_counts - neg_counts
                
                # 限制在[0, 255]范围内
                frame_image = torch.clamp(frame_image, 0, 255)
                
                # 转换为uint8并添加到结果列表
                images.append(frame_image.byte().cpu().numpy())
            
            return images


def main(method= event2img_count_raw ,
         original_path =  "/home_ssd/lhc/hand_detect_v3_test2",
         # new_folder_name = "event_img_120fps_count_raw", 
         frames = 2):
    
    '''
    original_path 目标处理数据集
    new_folder_name 目标处理数据集 的新子集名称 最新版根据输入的函数和帧数自动生成

    命名规则 :
    (1)event_img_帧数fps 其中帧率可取 60 120 300 500 1000; 对应的 frames 分别为 1 2 5 8 16 使用 event2img_gpu
    (2)event_img_帧数fps_count_raw 帧率和frames和上述一致 但生成仿真锐思智芯的数据 使用 event2img_count_raw
    
    '''

    # 后缀
    suffix = "" if method == event2img_gpu else "_count_raw"

    if frames == 1:
        new_folder_name = "event_img_60fps" + suffix
    elif frames == 2:
        new_folder_name = "event_img_120fps" + suffix
    elif frames == 5:
        new_folder_name = "event_img_300fps" + suffix
    elif frames == 8:
        new_folder_name = "event_img_500fps" + suffix
    elif frames == 16:
        new_folder_name = "event_img_1000fps" + suffix
    else:
        raise ValueError("frames must be 1 2 5 8 16")


    path =  original_path

    data_list = os.listdir(path)

    total_lens = len(os.listdir(path))
    local_len = 0

    for data in data_list:

        local_len+=1
        
        print(f"No {local_len} processing:{data} ...")

        sub_path = os.path.join(path ,data)

        new_event_fold = os.path.join(sub_path, new_folder_name)

        os.makedirs(new_event_fold, 0o777,exist_ok=True)

        old_event_fold = os.path.join(sub_path, "event_numpy")

        # 查询new_event_fold 里的文件数量 是否 等于 old_event_fold的文件数量的frames 倍数
        if len(os.listdir(new_event_fold)) == len(os.listdir(old_event_fold)) * frames:
            print("Attention! {} is complete".format(data))
            continue



        for event in tqdm(os.listdir(old_event_fold)):
            old_event_path = os.path.join(old_event_fold, event)

            e = np.load(old_event_path)


            event_img = method(e,frames) # 这里也要跟随命名精细修改

            if isinstance(event_img, list):

                j = 0

                for img in event_img:

                    if len(event[:-4]) ==3:
                        event = "0"+event

                    event_name = event.replace(".npy", f"_{j:02d}.png") 

                    new_event_path = os.path.join(new_event_fold, event_name)

                    cv2.imwrite(new_event_path, img)

                    j+=1

            else:

            

                event = event.replace("npy", "png")

                cv2.imwrite(os.path.join(new_event_fold, event), event_img)


if __name__ == "__main__":

    main()

# from concurrent.futures import ThreadPoolExecutor


# def process_one_event(old_event_path, new_event_fold):
#     e = np.load(old_event_path)
#     event_img = event2img(e)
#     event_name = os.path.basename(old_event_path).replace("npy", "png")
#     cv2.imwrite(os.path.join(new_event_fold, event_name), event_img)

# if __name__ == "__main__":
#     path = "/home_ssd/lhc/hand_dataset"
#     data_list = [
#         "/output_2cam_0807_0",
#         "/output_2cam_0807_1",
#         "/output_2cam_0808_0",
#         "/output_2cam_0808_1",
#         "/output_2cam_0808_2",
#         "/output_2cam_0808_3",
#     ]

#     for data in data_list:
#         for sub in os.listdir(path + data):
#             sub_path = os.path.join(path + data, sub)
#             new_event_fold = os.path.join(sub_path, "event_img_1")
#             os.makedirs(new_event_fold, 0o777, exist_ok=True)
#             old_event_fold = os.path.join(sub_path, "event_numpy")
#             event_files = [os.path.join(old_event_fold, f) for f in os.listdir(old_event_fold)]
#             with ThreadPoolExecutor(max_workers=8) as executor:  # 你可以根据CPU核数调整max_workers
#                 list(tqdm(
#                     executor.map(lambda f: process_one_event(f, new_event_fold), event_files),
#                     total=len(event_files),
#                     desc=f"Processing {old_event_fold}"
#                 ))
    

# if __name__ == '__main__':
#     data_path  = r'/home_ssd/lhc/hand_dataset/output_2cam_0807_0/2025-08-07_15_48_57'

#     # dataset = SmallHandDetectionDataset(data_path,"event_numpy","rgb_wrap", "rgb_wrap_window_new",)

#     # a,b,c = dataset[0]
    
#     # #打印图片a c

#     # cv2.imwrite("a.jpg",a)
#     # cv2.imwrite("c.jpg",c)

#     annotation2 = np.zeros((1, 5))
#     annotation1 = np.ones((1, 5))

#     newv = np.append(annotation1, annotation2, axis=0)
#     print (newv.shape)

#     big_dataset = BigHandDetectionDataset(r'/home_ssd/lhc/hand_dataset')

#     data = big_dataset[0]