import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread

import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm



from utils.utils import xyxy2xywh, xywh2xyxy


from torch.utils.data import ConcatDataset

# V2 版本 优化了数据结构 只有2层 把yolo格式的标签整合到同一个txt


class SmallHandDetectionDataset(Dataset):

    def __init__(self, root_dir, event_subdir = "event_numpy",  img_subdir='image', label_txt='rgb_wrap_window_new_all.txt', is_train=True):
        # self.samples = []
        self.istrain = is_train

        self.event_subdir = os.path.join(root_dir,event_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)
        self.label_subdir = os.path.join(root_dir,label_txt)
       
        self.event_files = os.listdir(self.event_subdir)
        self.img_files = os.listdir(self.img_subdir)
        # self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        # self.label_files.sort()
    

        length_event = len(os.listdir(self.event_subdir))
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        
    def __len__(self):
        return  self.count //5
    
    def read_labels_from_merged_txt(self, txt_path, n):
        with open(txt_path, 'r') as f:
            lines = f.readlines()
        if n >= len(lines):
            return np.zeros((0, 5), dtype=np.float32)
        line = lines[n].strip()
        # 去掉文件名和冒号
        if ':' in line:
            _, content = line.split(':', 1)
            content = content.strip()
        else:
            content = ''
        if not content:
            return np.zeros((0, 5), dtype=np.float32)
        # 多目标分割
        targets = [x.strip() for x in content.split(';') if x.strip()]
        arr = np.array([t.split() for t in targets], dtype=np.float32)
        assert arr.shape[1] == 5, f'> 5 label columns: {txt_path} line {n+1}'
        return arr
    
    def __getitem__(self, index):

        random_idx = random.randint(0, 4)

        idx = random_idx + index * 5

        event_path = os.path.join(self.event_subdir,self.event_files[idx])
        img_path = os.path.join(self.img_subdir,self.img_files[idx])
        

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)

        image = cv2.imread(img_path)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # image = image[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        # image = np.ascontiguousarray(image)



        # 标签文件为YOLO格式的txt 

        # label_path = os.path.join(self.label_subdir,self.label_files[idx])

        # 文件内容

        # 0_0000.txt: 2 0.051172 0.523611 0.102344 0.466667
        # 0_0001.txt: 
        # 0_0002.txt: 2 0.054297 0.522222 0.108594 0.463889 ; 0_0018.txt: 2 0.051172 0.523611 0.102344 0.466667
        # ...
        
        # 读取第n行的内容
     
        # 下面为第一版代码

        # 0_0000.txt
        # 2 0.051172 0.523611 0.102344 0.466667
        # ...


        # with open(self.label_subdir, 'r') as f:
        #     label = f.read()

        # # 转化为数组
        # if label is None:
        #     label = np.zeros((0, 5))
        # else:
        #     label = np.array([x.split() for x in label.splitlines()], dtype=np.float32)
        #     assert label.shape[1] == 5, '> 5 label columns: %s' % label_path  
        #     # cls normal_x_center normal_y_center normal_w normal_h

        label = self.read_labels_from_merged_txt(self.label_subdir, idx)

        return image, label, img_path

class BigHandDetectionDataset(Dataset):

    def __init__(self, 
                 path =  "/home_ssd/lhc/hand_detect_v2",
                 is_train=True,

                 img_size=1280, 
                 rect=True,

                 batch_size=16,
                 hyp=None, ):#augment=False, #不做数据增强 （翻转）

        self.top_path = path

        self.dataset_list = []
      
        for sub in os.listdir(self.top_path):
            sub_path = os.path.join(self.top_path, sub)

            # print(sub_path)
            dataset = SmallHandDetectionDataset(sub_path,
                                    "event_numpy","rgb_wrap", "rgb_wrap_window_new_all.txt", is_train)

            self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list

        self.hyp = hyp
        self.rect = rect
        self.img_size = img_size
        self.batch_size = batch_size


    def __len__(self):
        return len(self.concat_dataset)
    
    def load_image(self, img):

        h0, w0 = img.shape[:2]  # orig hw
        r = self.img_size / max(h0, w0)  # resize image to img_size
        if r != 1:  # always resize down, only resize up if training with augmentation
            interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
            img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
        return img, (h0, w0), img.shape[:2]  # img, hw_original, hw_resized
    
    def for_letter_box(self, img, new_shape=(416, 416), color=(0,0,0), auto=False, scaleFill=False, scaleup=False):
        # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better test mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = new_shape
            ratio = new_shape[0] / shape[1], new_shape[1] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)



    def __getitem__(self, index):
        image, label_origin, img_path = self.concat_dataset[index]

        hyp = self.hyp 

        img, (h0, w0), (h, w) = self.load_image(image)

        # Letterbox
        # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape
        
        shape = self.img_size

        img, ratio, pad = self.for_letter_box(img, shape, auto=False, scaleup=False)
        
        
        shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling

        # Load labels
        labels = []

        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = ratio[0] * w * (label_origin[:, 1] - label_origin[:, 3] / 2) + pad[0]  # pad width
            labels[:, 2] = ratio[1] * h * (label_origin[:, 2] - label_origin[:, 4] / 2) + pad[1]  # pad height
            labels[:, 3] = ratio[0] * w * (label_origin[:, 1] + label_origin[:, 3] / 2) + pad[0]
            labels[:, 4] = ratio[1] * h * (label_origin[:, 2] + label_origin[:, 4] / 2) + pad[1]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        return torch.from_numpy(img), labels_out, img_path, shapes
    
    
    @staticmethod
    def collate_fn(batch):
        img, label, path, shapes = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0), path, shapes
    



class SmallHandDetectionDataset_Event(Dataset):

    def __init__(self, root_dir, event_subdir = "event_img_60fps",  img_subdir="rgb_wrap", label_txt='event_frame_window_new_all.txt', is_train=True):
        # self.samples = []
        self.istrain = is_train

        self.event_subdir = os.path.join(root_dir,event_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)
        self.label_subdir = os.path.join(root_dir,label_txt)
       
        self.event_files = os.listdir(self.event_subdir)
        self.img_files = os.listdir(self.img_subdir)
        # self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        # self.label_files.sort()
    

        length_event = len(os.listdir(self.event_subdir))
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        
    def __len__(self):
        return  self.count //5
    
    def read_labels_from_merged_txt(self, txt_path, n):
        with open(txt_path, 'r') as f:
            lines = f.readlines()
        if n >= len(lines):
            return np.zeros((0, 5), dtype=np.float32)
        line = lines[n].strip()
        # 去掉文件名和冒号
        if ':' in line:
            _, content = line.split(':', 1)
            content = content.strip()
        else:
            content = ''
        if not content:
            return np.zeros((0, 5), dtype=np.float32)
        # 多目标分割
        targets = [x.strip() for x in content.split(';') if x.strip()]
        arr = np.array([t.split() for t in targets], dtype=np.float32)
        assert arr.shape[1] == 5, f'> 5 label columns: {txt_path} line {n+1}'
        return arr
    
    def __getitem__(self, index):

        random_idx = random.randint(0, 4)

        idx = random_idx + index * 5

        event_path = os.path.join(self.event_subdir,self.event_files[idx])
        img_path = os.path.join(self.img_subdir,self.img_files[idx])
        

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)

        event_img = cv2.imread(event_path)

 
        label = self.read_labels_from_merged_txt(self.label_subdir, idx)

        return event_img, label, event_path

class BigHandDetectionDataset_Event(Dataset):

    def __init__(self, 
                 path =  "/home_ssd/lhc/hand_detect_v2",
                 is_train=True,

                 img_size=1280, 
                 rect=True,

                 batch_size=16,
                 hyp=None, ):#augment=False, #不做数据增强 （翻转）

        self.top_path = path

        self.dataset_list = []
      
        for sub in os.listdir(self.top_path):
            sub_path = os.path.join(self.top_path, sub)

            # print(sub_path)
            dataset = SmallHandDetectionDataset_Event(root_dir = sub_path,
                                   )

            self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list

        self.hyp = hyp
        self.rect = rect
        self.img_size = img_size
        self.batch_size = batch_size


    def __len__(self):
        return len(self.concat_dataset)
    
    def load_image(self, img):

        h0, w0 = img.shape[:2]  # orig hw
        r = self.img_size / max(h0, w0)  # resize image to img_size
        if r != 1:  # always resize down, only resize up if training with augmentation
            interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
            img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
        return img, (h0, w0), img.shape[:2]  # img, hw_original, hw_resized
    
    def for_letter_box(self, img, new_shape=(416, 416), color=(0,0,0), auto=False, scaleFill=False, scaleup=False):
        # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better test mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = new_shape
            ratio = new_shape[0] / shape[1], new_shape[1] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)



    def __getitem__(self, index):
        image, label_origin, img_path = self.concat_dataset[index]

        hyp = self.hyp 

        img, (h0, w0), (h, w) = self.load_image(image)

        # Letterbox
        # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape
        
        shape = self.img_size

        img, ratio, pad = self.for_letter_box(img, shape, auto=False, scaleup=False)
        
        
        shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling

        # Load labels
        labels = []

        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = ratio[0] * w * (label_origin[:, 1] - label_origin[:, 3] / 2) + pad[0]  # pad width
            labels[:, 2] = ratio[1] * h * (label_origin[:, 2] - label_origin[:, 4] / 2) + pad[1]  # pad height
            labels[:, 3] = ratio[0] * w * (label_origin[:, 1] + label_origin[:, 3] / 2) + pad[0]
            labels[:, 4] = ratio[1] * h * (label_origin[:, 2] + label_origin[:, 4] / 2) + pad[1]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        return torch.from_numpy(img), labels_out, img_path, shapes
    
    
    @staticmethod
    def collate_fn(batch):
        img, label, path, shapes = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0), path, shapes
    


if __name__ == '__main__':


    big_dataset = BigHandDetectionDataset()

    data = big_dataset[0]

    print()

