if True:
    import sys
    import os
    #os.environ['CUDA_VISIBLE_DEVICES'] = "1"#attention the text order

    dir_path = os.path.dirname(os.path.realpath(__file__)) # get now path 
    parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir)) # get father path

    sys.path.append(parent_dir_path) # add father path

import random
import cv2
import numpy as np
import torch

from torch.utils.data import Dataset

from utils.utils import xyxy2xywh, xywh2xyxy
from utils.wiky_utils import event2img_count, event2img_cover

from torch.utils.data import ConcatDataset


# Ver4 试验型 


class SmallHandDetectionDataset(Dataset): # 'rgb_wrap_window_new_all.txt' or 'event_frame_window_new_all.txt'

    def __init__(self, root_dir, 
                 
                 eve_type = "count_c=1", # or "cover",
                 eve_bin = 2, # 60 120  300 500 1000 ,
                 eve_bin_needed = -1,
                 
                 is_train=True,
                 train_step = 5,
                 eve_subdir = "event_numpy",  
                 img_subdir = "rgb_wrap", 
                 eve_label_txt = 'event_frame_window_new_all.txt', 
                 img_label_txt = 'rgb_wrap_window_new_all.txt',

                 ):
        '''

        root_dir: 数据集根目录
        eve_type: "count_c=1" or "cover" 其中 c为阈值
        eve_bin: 2 (模拟 60fps时为1, 120fps时为2, 300fps时为5, 500fps时为8, 1000fps时为16)
        eve_bin_needed: -1 (取-1时只取最后一个bin)
        is_train: True or False 开启则在步长内随机取一个
        train_step: 训练和测试的取数据的步长
               
        '''
        self.is_train = is_train

        self.eve_subdir = os.path.join(root_dir,eve_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)

        self.eve_label_subdir = os.path.join(root_dir,eve_label_txt)
        self.img_label_subdir = os.path.join(root_dir,img_label_txt)

        # 如果文件不存在
        if not os.path.exists(self.eve_label_subdir):
            # delete "_all.txt"
            self.eve_label_subdir = os.path.join(root_dir,eve_label_txt[:-8]+".txt")  
        
        if not os.path.exists(self.img_label_subdir):
            # delete "_all.txt"
            self.img_label_subdir = os.path.join(root_dir,img_label_txt[:-8]+".txt")

       
        self.event_files = os.listdir(self.eve_subdir)
        self.img_files = os.listdir(self.img_subdir)
        # self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        # self.label_files.sort()
    

        length_event = len(os.listdir(self.eve_subdir))
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        self.step = train_step

        self.eve_type = eve_type[:5]

        self.eve_threshold = int(eve_type[-1]) if self.eve_type == "count" else None

        self.eve_bin = eve_bin

        self.eve_bin_needed = eve_bin_needed

        
    def __len__(self):
        return  self.count // self.step - 1
    
    def read_labels_from_merged_txt(self, txt_path, n):
        with open(txt_path, 'r') as f:
            lines = f.readlines()
        if n >= len(lines):
            return np.zeros((0, 5), dtype=np.float32)
        line = lines[n].strip()
        # 去掉文件名和冒号
        if ':' in line:
            _, content = line.split(':', 1)
            content = content.strip()
        else:
            content = ''
        if not content:
            return np.zeros((0, 5), dtype=np.float32)
        # 多目标分割
        targets = [x.strip() for x in content.split(';') if x.strip()]
        arr = np.array([t.split() for t in targets], dtype=np.float32)
        assert arr.shape[1] == 5, f'> 5 label columns: {txt_path} line {n+1}'
        return arr
    
    def __getitem__(self, index):

        if self.is_train:
            random_idx = random.randint(0, self.step - 1)
        else:
            random_idx = 0

        idx = random_idx + index * self.step

        event_path = os.path.join(self.eve_subdir,self.event_files[idx])
        img_path = os.path.join(self.img_subdir,self.img_files[idx + 1])  # +1 是因为event和img的对应帧数 与锐视智芯相关
        

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)



        event = np.load(event_path)

        
        if self.eve_type == "count" :
            event_img = event2img_count(event,
                                  channel = 3,
                                  num_frame=self.eve_bin,
                                  needed= self.eve_bin_needed,
                                  threshold = self.eve_threshold,) 
        else:
            event_img = event2img_cover(event,
                                    channel = 3,
                                    num_frame=self.eve_bin,
                                    needed= self.eve_bin_needed,)

        img = cv2.imread(img_path)

 
        eve_label = self.read_labels_from_merged_txt(self.eve_label_subdir, idx)
        img_label = self.read_labels_from_merged_txt(self.img_label_subdir, idx+1) # +1 

        return img,event_img, img_label, eve_label, img_path

class BigHandDetectionDataset(Dataset): 

    def __init__(self, 
                path =  "/home_ssd/lhc/hand_detect_v2",
                eve_type =  "count_c=1",
                eve_bin = 2,
                eve_bin_needed = -1,
                train_step = 5,
                is_train=True,):
        
        '''

        root_dir: 数据集根目录
        eve_type: "count_c=1" or "cover" 其中 c为阈值
        eve_bin: 2 (模拟 60fps时为1, 120fps时为2, 300fps时为5, 500fps时为8, 1000fps时为16)
        eve_bin_needed: -1 (取-1时只取最后一个bin)
        is_train: True or False 开启则在步长内随机取一个
        train_step: 训练和测试的取数据的步长
               
        '''

        self.top_path = path

        self.dataset_list = []
      
        for sub in os.listdir(self.top_path):
            sub_path = os.path.join(self.top_path, sub)

            # print(sub_path)
            dataset = SmallHandDetectionDataset(root_dir = sub_path,
                                   eve_type = eve_type,
                                   eve_bin = eve_bin,
                                   eve_bin_needed = eve_bin_needed,
                                   is_train = is_train,
                                   train_step = train_step,
                                   )

            self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list




    def __len__(self):
        return len(self.concat_dataset)
    
    def padding_for_image(self, img,  color=(0,0,0),event_image=None):

        # new h and w must be the 32 x N

        shape = img.shape[:2]

        new_h = int(np.ceil(shape[0] / 64) * 64)
        new_w = int(np.ceil(shape[1] / 64) * 64)

        # 往两边均分填充

        top, bottom = (new_h - shape[0]) // 2, (new_h - shape[0]) - (new_h - shape[0]) // 2

        left, right = (new_w - shape[1]) // 2, (new_w - shape[1]) - (new_w - shape[1]) // 2

        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border

        if event_image is not None:

            # 如果是列表：
            if isinstance(event_image, list):
                eve_out= []
                for i, eve in enumerate(event_image):
                    eve = cv2.copyMakeBorder(eve, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
                    eve_out.append(eve)
                return img,eve_out, (top, bottom, left, right)
            
            else:
                event_image = cv2.copyMakeBorder(event_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
 
            # cv2.imwrite("test.jpg", img) for test

        return img,event_image, (top, bottom, left, right)
    
    
    def process_label(self, w, h, new_w, new_h, label_origin, new_padding):
        labels = []
        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = w * (label_origin[:, 1] - label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 2] = h * (label_origin[:, 2] - label_origin[:, 4] / 2) + new_padding[0]
            labels[:, 3] = w * (label_origin[:, 1] + label_origin[:, 3] / 2) + new_padding[2]
            labels[:, 4] = h * (label_origin[:, 2] + label_origin[:, 4] / 2) + new_padding[0]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= new_h  # height
            labels[:, [1, 3]] /= new_w  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        return labels_out


    def __getitem__(self, index):

        image,eve_image, img_label_origin, eve_label_origin, img_path = self.concat_dataset[index]

        # hyp = self.hyp 

        original_h,original_w = image.shape[:2]

        img, new_event_image, new_padding = self.padding_for_image(img = image,event_image = eve_image)

        new_h , new_w = img.shape[:2]
        
        # Load labels
        
        img_labels_out = self.process_label(original_w, original_h, new_w, new_h, img_label_origin, new_padding)
        eve_labels_out = self.process_label(original_w, original_h, new_w, new_h, eve_label_origin, new_padding)


        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        if isinstance(eve_image, list):

            new_event_image = [eve.transpose(2, 0, 1) for eve in new_event_image]
            new_event_image = np.concatenate(new_event_image,0)
            
        else:
            new_event_image = new_event_image.transpose(2, 0, 1)  # BGR to RGB( single color unnecessary), to 3x416x416
            new_event_image = np.ascontiguousarray(new_event_image)

        return torch.from_numpy(img), torch.from_numpy(new_event_image), img_labels_out, eve_labels_out, img_path
    
    
    @staticmethod
    def collate_fn(batch):
        img,eve, label1, label2,path= zip(*batch)  # transposed
        for i, l in enumerate(label1):
            l[:, 0] = i  # add target image index for build_targets()
        for i, l in enumerate(label2):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.stack(eve, 0), torch.cat(label1, 0), torch.cat(label2, 0),path
    


# if __name__ == '__main__':


#     big_dataset = BigHandDetectionDataset()

#     data = big_dataset[0]

#     print()