import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread

import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm



from utils.utils import xyxy2xywh, xywh2xyxy


from torch.utils.data import ConcatDataset


# Ver3.1 用于训练与测试双流数据的训练


class SmallHandDetectionDataset(Dataset): # 'rgb_wrap_window_new_all.txt' or 'event_frame_window_new_all.txt'

    def __init__(self, root_dir, 
                 event_subdir = "event_img_60fps",
                 event_needed = -1, 
                 is_train=True, 
                 train_step = 5,

                 img_subdir="rgb_wrap", 
                 label_txt_eve='event_frame_window_new_all.txt', 
                 label_txt_img='rgb_wrap_window_new_all.txt',
                 
                 ):
        
        '''
        
        event_needed 取-1表示子序列里的最后一张（符合锐思智芯的方式） 标签：事件
        0 表示不取事件 即纯图片训练 标签 img
        1 表示事件与图像融合 标签 img
        
        '''


        self.is_train = is_train

        self.event_subdir = os.path.join(root_dir,event_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)

        self.event_needed = event_needed

        if self.event_needed !=0:
            self.label_subdir = os.path.join(root_dir,label_txt_eve)
            # 如果文件不存在
            if not os.path.exists(self.label_subdir):
                #raise Warning(f"Event directory '{self.event_subdir}' does not exist")
                self.label_subdir = os.path.join(root_dir,label_txt_eve[:-8]+".txt")      
        else:
            self.label_subdir = os.path.join(root_dir,label_txt_img)
            if not os.path.exists(self.label_subdir):
                #raise Warning(f"Event directory '{self.event_subdir}' does not exist")
                self.label_subdir = os.path.join(root_dir,label_txt_img[:-8]+".txt") 
          
       
        self.event_files = os.listdir(self.event_subdir)
        self.img_files = os.listdir(self.img_subdir)
        # self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        # self.label_files.sort()

        self.fps_str = event_subdir[-16:-10] if event_subdir[-10:] == "_count_raw" else event_subdir[-6:]
        if self.fps_str[:3]=="_60" or event_subdir == "event_frame":
            self.eve_bin = 1
        else:
            self.eve_bin = int(self.fps_str[:3])//60
        
        # print(f"now eve fps_str:{self.fps_str} & eve_bin:{self.eve_bin}")
    

        length_event = len(os.listdir(self.event_subdir))//self.eve_bin
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        self.train_step = train_step

        
    def __len__(self):
        return  self.count //self.train_step - 1
    
    def read_labels_from_merged_txt(self, txt_path, n):
        with open(txt_path, 'r') as f:
            lines = f.readlines()
        if n >= len(lines):
            return np.zeros((0, 5), dtype=np.float32)
        line = lines[n].strip()
        # 去掉文件名和冒号
        if ':' in line:
            _, content = line.split(':', 1)
            content = content.strip()
        else:
            content = ''
        if not content:
            return np.zeros((0, 5), dtype=np.float32)
        # 多目标分割
        targets = [x.strip() for x in content.split(';') if x.strip()]
        arr = np.array([t.split() for t in targets], dtype=np.float32)

        if arr.shape[1] > 5:
            arr = np.hstack([arr[:, :5], np.array(arr[:, 5:], dtype=np.float32)])
        assert arr.shape[1] == 5, f'> 5 label columns: {txt_path} line {n+1}'
        return arr
    
    def __getitem__(self, index):

        if self.is_train:
            random_idx = random.randint(0, self.train_step -1)
        else:
            random_idx = 0

        idx = random_idx + index * self.train_step

        
        img_path = os.path.join(self.img_subdir,self.img_files[idx + 1])  # +1 是因为event和img的对应帧数 与锐视智芯相关
        img = cv2.imread(img_path)

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)
        if self.eve_bin == 1:
            
            event_path = os.path.join(self.event_subdir,self.event_files[idx])
            event_img = cv2.imread(event_path)
        else:
            if self.event_needed < 1:
                event_path = os.path.join(self.event_subdir,self.event_files[(idx+1) * self.eve_bin -1])
                event_img = cv2.imread(event_path)
            else:
                event_img = []

                for i in range(self.eve_bin):
                     event_path = os.path.join(self.event_subdir,self.event_files[idx * self.eve_bin + i])
                     eve_img = cv2.imread(event_path)
                     event_img.append(eve_img)

        offset = 0 if self.event_needed <0 else 1
        label = self.read_labels_from_merged_txt(self.label_subdir, idx + offset)

        return img,event_img, label, event_path

class BigHandDetectionDataset_Dual(Dataset): 

    def __init__(self, 
                 path =  "/home_ssd/lhc/hand_detect_v2",
                 
                 event_subdir = "event_img_60fps",
                 event_needed = -1,
                 is_train = True,
                 train_step = 5,

):

        self.top_path = path

        self.dataset_list = []

        mid_event_subdir = event_subdir # [:-10] if event_subdir[-10:] =='_count_raw' else event_subdir
      
        for sub in os.listdir(self.top_path):
            sub_path = os.path.join(self.top_path, sub)

            # print(sub_path)
            dataset = SmallHandDetectionDataset(root_dir = sub_path,
                                                     event_subdir= mid_event_subdir,
                                                     event_needed=event_needed,
                                                     is_train = is_train,
                                                     train_step = train_step
                                   )

            self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list

        # self.hyp = hyp
        # self.rect = rect
        # self.img_size = img_size
        # self.batch_size = batch_size


    def __len__(self):
        return len(self.concat_dataset)
    
    def padding_for_image(self, img,  color=(0,0,0),event_image=None):

        # new h and w must be the 32 x N

        shape = img.shape[:2]

        new_h = int(np.ceil(shape[0] / 64) * 64)
        new_w = int(np.ceil(shape[1] / 64) * 64)

        # 往两边均分填充

        top, bottom = (new_h - shape[0]) // 2, (new_h - shape[0]) - (new_h - shape[0]) // 2

        left, right = (new_w - shape[1]) // 2, (new_w - shape[1]) - (new_w - shape[1]) // 2

        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border

        if event_image is not None:

            if not isinstance(event_image, list):
                event_image = cv2.copyMakeBorder(event_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
            else:
                for i in range(len(event_image)):
                    event_image[i] = cv2.copyMakeBorder(event_image[i], top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)

        return img, event_image, (top, bottom, left, right)


    def __getitem__(self, index):

        image,eve_image, label_origin, img_path = self.concat_dataset[index]

        # hyp = self.hyp 

        h,w = image.shape[:2]

        img, new_event_image, new_padding = self.padding_for_image(img = image,event_image = eve_image)
        
        # Load labels
        labels = []

        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = w * (label_origin[:, 1] - label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 2] = h * (label_origin[:, 2] - label_origin[:, 4] / 2) + new_padding[0]
            labels[:, 3] = w * (label_origin[:, 1] + label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 4] = h * (label_origin[:, 2] + label_origin[:, 4] / 2) + new_padding[0]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        if not isinstance(new_event_image, list):
            new_event_image = new_event_image.transpose(2, 0, 1)  # BGR to RGB( single color unnecessary), to 3x416x416
            new_event_image = np.ascontiguousarray(new_event_image)
            return torch.from_numpy(img), torch.from_numpy(new_event_image), labels_out, img_path  
        
        else:
            new_eve_list = []
            for i in range(len(new_event_image)):
                new_event= new_event_image[i].transpose(2, 0, 1)  # BGR to RGB( single color unnecessary), to 3x416x416
                new_eve_list.append (np.ascontiguousarray(new_event))

            all_eve = np.concatenate(new_eve_list, axis=0)

            return torch.from_numpy(img), torch.from_numpy(all_eve), labels_out, img_path
    @staticmethod
    def collate_fn(batch):
        img,eve, label, path = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.stack(eve, 0), torch.cat(label, 0), path
    
# Ver3.2 继承3.1，输出有两个label


class SmallHandDetectionDataset_2Label(SmallHandDetectionDataset): # 'rgb_wrap_window_new_all.txt' or 'event_frame_window_new_all.txt'
    def __init__(self, root_dir, 
                 event_subdir = "event_img_60fps",
                 event_needed = -1, 
                 is_train=True, 
                 train_step = 5,

                 img_subdir="rgb_wrap", 
                 label_txt_eve='event_frame_window_new_all.txt', 
                 label_txt_img='rgb_wrap_window_new_all.txt',
                 
                 ):
        
        '''
        event_needed 取-1表示子序列里的最后一张（符合锐思智芯的方式） 标签：事件

        event_subdir 影响event_bin 
        '''


        self.is_train = is_train

        self.event_subdir = os.path.join(root_dir,event_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)

        self.event_needed = event_needed


        self.eve_label_subdir = os.path.join(root_dir,label_txt_eve)
        if not os.path.exists(self.eve_label_subdir):
            #raise Warning(f"Event directory '{self.event_subdir}' does not exist")
            self.eve_label_subdir = os.path.join(root_dir,label_txt_eve[:-8]+".txt")      
   
        self.img_label_subdir = os.path.join(root_dir,label_txt_img)
        if not os.path.exists(self.img_label_subdir):
            #raise Warning(f"Event directory '{self.event_subdir}' does not exist")
            self.img_label_subdir = os.path.join(root_dir,label_txt_img[:-8]+".txt") 
          
       
        self.event_files = os.listdir(self.event_subdir)
        self.img_files = os.listdir(self.img_subdir)
        # self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        # self.label_files.sort()

        self.fps_str = event_subdir[-16:-10] if event_subdir[-10:] == "_count_raw" else event_subdir[-6:]
        if self.fps_str[:3]=="_60" or event_subdir == "event_frame":
            self.eve_bin = 1
        else:
            self.eve_bin = int(self.fps_str[:3])//60
        
        # print(f"now eve fps_str:{self.fps_str} & eve_bin:{self.eve_bin}")
    

        length_event = len(os.listdir(self.event_subdir))//self.eve_bin
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        self.train_step = train_step

        
    def __len__(self):
        return  super().__len__() # self.count //self.train_step - 1
    
    def read_labels_from_merged_txt(self, txt_path, n):
        return super().read_labels_from_merged_txt(txt_path, n)
    

    
    def __getitem__(self, index):

        if self.is_train:
            random_idx = random.randint(0, self.train_step -1)
        else:
            random_idx = 0

        idx = random_idx + index * self.train_step

        
        img_path = os.path.join(self.img_subdir,self.img_files[idx + 1])  # +1 是因为event和img的对应帧数 与锐视智芯相关
        img = cv2.imread(img_path)

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)
        if self.eve_bin == 1:
            
            event_path = os.path.join(self.event_subdir,self.event_files[idx])
            event_img = cv2.imread(event_path)
        else:
            if self.event_needed < 1:
                event_path = os.path.join(self.event_subdir,self.event_files[(idx+1) * self.eve_bin -1])
                event_img = cv2.imread(event_path)
            else:
                event_img = []

                for i in range(self.eve_bin):
                     event_path = os.path.join(self.event_subdir,self.event_files[idx * self.eve_bin + i])
                     eve_img = cv2.imread(event_path)
                     event_img.append(eve_img)

    
        img_label = self.read_labels_from_merged_txt(self.img_label_subdir, idx + 1)
        eve_label = self.read_labels_from_merged_txt(self.eve_label_subdir, idx)

        return img,event_img, img_label, eve_label,img_path


class BigHandDetectionDataset_Dual_2Label(BigHandDetectionDataset_Dual):

    def __init__(self, 
                 path="/home_ssd/lhc/hand_detect_v3",
                 event_subdir = "event_img_60fps",
                 event_needed = -1,
                 is_train = True,
                 train_step = 5,
                 distance = "all",
                 ):
        
        self.top_path = path

        self.dataset_list = []

        mid_event_subdir = event_subdir # [:-10] if event_subdir[-10:] =='_count_raw' else event_subdir

        if is_train == False and distance == "3m":
            subs = ['2025-11-10_11_47_12','2025-11-10_11_47_53','2025-11-10_12_02_13','2025-11-12_15_39_22','2025-11-12_15_40_41']
        
        elif is_train == False and distance == "2m":
            subs = ['2025-11-10_11_49_53','2025-11-10_11_51_10','2025-11-10_11_59_41','2025-11-12_15_42_59','2025-11-12_15_43_46']
        
        elif is_train == False and distance == "1p5m":
            subs = ['2025-11-10_11_57_27','2025-11-10_11_58_08','2025-11-12_15_45_30','2025-11-12_15_46_16']

        else:
            subs  = os.listdir(self.top_path)

        for sub in subs:
            sub_path = os.path.join(self.top_path, sub)

            # print(sub_path)
            dataset = SmallHandDetectionDataset_2Label(root_dir = sub_path,
                                                    event_subdir= mid_event_subdir,
                                                    event_needed=event_needed,
                                                    is_train = is_train,
                                                    train_step = train_step
                                )

            self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list


    # 继承所有其他方法，不需要重写
    def __len__(self):
        return super().__len__()

    def padding_for_image(self, img, color=(0,0,0), event_image=None):
        return super().padding_for_image(img, color, event_image)
    
    def process_label(self, w, h, new_w, new_h, label_origin, new_padding):
        labels = []
        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = w * (label_origin[:, 1] - label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 2] = h * (label_origin[:, 2] - label_origin[:, 4] / 2) + new_padding[0]
            labels[:, 3] = w * (label_origin[:, 1] + label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 4] = h * (label_origin[:, 2] + label_origin[:, 4] / 2) + new_padding[0]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= new_h  # height
            labels[:, [1, 3]] /= new_w  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        return labels_out
    
    def __getitem__(self, index):

        image,eve_image, img_label_origin, eve_label_origin,path = self.concat_dataset[index]

        original_h,original_w = image.shape[:2]

        img, new_event_image, new_padding = self.padding_for_image(img = image,event_image = eve_image)

        new_h , new_w = img.shape[:2]
        
        # Load labels
        
        img_labels_out = self.process_label(original_w, original_h, new_w, new_h, img_label_origin, new_padding)
        eve_labels_out = self.process_label(original_w, original_h, new_w, new_h, eve_label_origin, new_padding)
        

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        if not isinstance(new_event_image, list):
            new_event_image = new_event_image.transpose(2, 0, 1)  # BGR to RGB( single color unnecessary), to 3x416x416
            new_event_image = np.ascontiguousarray(new_event_image)
            return torch.from_numpy(img), torch.from_numpy(new_event_image), img_labels_out, eve_labels_out   ,path
        
        else:
            new_eve_list = []
            for i in range(len(new_event_image)):
                new_event= new_event_image[i].transpose(2, 0, 1)  # BGR to RGB( single color unnecessary), to 3x416x416
                new_eve_list.append (np.ascontiguousarray(new_event))

            all_eve = np.concatenate(new_eve_list, axis=0)

            return torch.from_numpy(img), torch.from_numpy(all_eve), img_labels_out, eve_labels_out  ,path
    @staticmethod
    def collate_fn(batch):
        img,eve, label1, label2,path= zip(*batch)  # transposed
        for i, l in enumerate(label1):
            l[:, 0] = i  # add target image index for build_targets()
        for i, l in enumerate(label2):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.stack(eve, 0), torch.cat(label1, 0), torch.cat(label2, 0),path






# if __name__ == '__main__':


#     big_dataset = BigHandDetectionDataset_Dual()

#     data = big_dataset[0]

#     print()