import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread

import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm

from utils.utils import xyxy2xywh, xywh2xyxy

def event2img(event,num_frame = 1,w = 1280, h = 720):

    # event (N,) 'x' 'y' 'p' 't'
    # print(event[0]['t'])

    if num_frame == 1:

        image = np.zeros((h,w,3),dtype=np.uint8)

        for e in event: 

            # p =-1 100 100 100
            #p= 1 200 200 200 
            #new one will cover old one

            if e['p'] == 1:
                image[int(e['y']),int(e['x']),...] = [200,200,200]
            else:
                image[int(e['y']),int(e['x']),...] = [100,100,100]

        return image

    else:
        # 获取所有t的范围
        # t_list = np.array([e['t'] for e in event])
        t_min, t_max = int(e[0]['t']), int(e[-1]['t'])
        # 按时间均分为num_frame段
        t_bins = np.linspace(t_min, t_max, num_frame + 1)
        images = [np.zeros((h, w, 3), dtype=np.uint8) for _ in range(num_frame)]
        for e in event:
            # 找到属于哪个时间段
            frame_idx = np.searchsorted(t_bins, e['t'], side='right') - 1
            frame_idx = min(max(frame_idx, 0), num_frame - 1)
            if e['p'] == 1:
                images[frame_idx][int(e['y']), int(e['x']), ...] = [200, 200, 200]
            else:
                images[frame_idx][int(e['y']), int(e['x']), ...] = [100, 100, 100]
        return images


class SmallHandDetectionDataset(Dataset):

    def __init__(self, root_dir, event_subdir = "event_numpy",  img_subdir='image', label_subdir='label', is_train=True,event_or_image="event"):
        # self.samples = []
        self.istrain = is_train

        self.event_subdir = os.path.join(root_dir,event_subdir)
        self.img_subdir = os.path.join(root_dir,img_subdir)
        self.label_subdir = os.path.join(root_dir,label_subdir)
       
        self.event_files = os.listdir(self.event_subdir)
        self.img_files = os.listdir(self.img_subdir)
        self.label_files = os.listdir(self.label_subdir)

        self.event_files.sort()
        self.img_files.sort()
        self.label_files.sort()
    

        length_event = len(os.listdir(self.event_subdir))
        length_img = len(os.listdir(self.img_subdir))
        self.count = min(length_event,length_img)

        self.event_or_image  = event_or_image

        
    def __len__(self):
        return  self.count //15 - 1
    
    def __getitem__(self, index):

        random_idx = random.randint(0, 14)

        idx = random_idx + index * 15

        event_path = os.path.join(self.event_subdir,self.event_files[idx])
        img_path = os.path.join(self.img_subdir,self.img_files[idx+1])
        label_path = os.path.join(self.label_subdir,self.label_files[idx+1])

        # event = cv2.imread(event_path, cv2.IMREAD_UNCHANGED)
        # event = cv2.cvtColor(event, cv2.COLOR_BGR2RGB)

        image,event  = None,None

        if self.event_or_image == "event":
            event  = event2img(np.load(event_path),num_frame=1)
        elif self.event_or_image == "image":
            image = cv2.imread(img_path)
        else:
            image = cv2.imread(img_path)
            event = event2img(np.load(event_path), num_frame=1)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # image = image[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        # image = np.ascontiguousarray(image)



        # 标签文件为YOLO格式的txt 

        with open(label_path, 'r') as f:
            label = f.read()

        # 转化为数组
        if label is None:
            label = np.zeros((0, 5))
        else:
            label = np.array([x.split() for x in label.splitlines()], dtype=np.float32)
            assert label.shape[1] == 5, '> 5 label columns: %s' % label_path  
            # cls normal_x_center normal_y_center normal_w normal_h

        return image, event,label, img_path

from torch.utils.data import ConcatDataset

class BigHandDetectionDataset(Dataset):

    def __init__(self, 
                 path =  "/home_origin/ChengZY/data/hand_detection",
                 is_train=False,
                 event_or_image="event",

                 img_size=1280, 
                 rect=True,

                 batch_size=16,
                 hyp=None, ):#augment=False, #不做数据增强 （翻转）

        self.top_path = path

        self.data_list = ["/output_2cam_0807_0",
                    "/output_2cam_0807_1",
                    "/output_2cam_0808_0",
                    "/output_2cam_0808_1",
                    "/output_2cam_0808_2",
                    "/output_2cam_0808_3",]


        self.dataset_list = []
        for data in self.data_list:
            if os.path.isdir(self.top_path + data ):
               
                for sub in os.listdir(self.top_path + data):

                    sub_path = os.path.join(self.top_path + data, sub)

                    # print(sub_path)
                    dataset = SmallHandDetectionDataset(sub_path,
                                            "event_numpy","rgb_wrap", "rgb_wrap_window_new", is_train,event_or_image)

                    self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list

        self.hyp = hyp
        self.rect = rect
        self.img_size = img_size
        self.batch_size = batch_size


    def __len__(self):
        return len(self.concat_dataset)
    
    def load_image(self, img):

        h0, w0 = img.shape[:2]  # orig hw
        r = self.img_size / max(h0, w0)  # resize image to img_size
        if r != 1:  # always resize down, only resize up if training with augmentation
            interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
            img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
        return img, (h0, w0), img.shape[:2]  # img, hw_original, hw_resized
    
    def for_letter_box(self, img, new_shape=(416, 416), color=(0,0,0), auto=False, scaleFill=False, scaleup=False):
        # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better test mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = new_shape
            ratio = new_shape[0] / shape[1], new_shape[1] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)



    def __getitem__(self, index):
        image, event, label_origin, img_path = self.concat_dataset[index]

        hyp = self.hyp 

        img, (h0, w0), (h, w) = self.load_image(image)
        eve, (h0, w0), (h, w) = self.load_image(event)


        # Letterbox
        # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape
        
        shape = self.img_size

        img, ratio, pad = self.for_letter_box(img, shape, auto=False, scaleup=False)
        eve, ratio, pad = self.for_letter_box(eve, shape, auto=False, scaleup=False)
        
        
        shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling

        # Load labels
        labels = []

        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] = ratio[0] * w * (label_origin[:, 1] - label_origin[:, 3] / 2) + pad[0]  # pad width
            labels[:, 2] = ratio[1] * h * (label_origin[:, 2] - label_origin[:, 4] / 2) + pad[1]  # pad height
            labels[:, 3] = ratio[0] * w * (label_origin[:, 1] + label_origin[:, 3] / 2) + pad[0]
            labels[:, 4] = ratio[1] * h * (label_origin[:, 2] + label_origin[:, 4] / 2) + pad[1]
       
        
        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        eve = eve.transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        eve = np.ascontiguousarray(eve)

        return torch.from_numpy(img),torch.from_numpy(eve), labels_out, img_path
    
    
    
    @staticmethod
    def collate_fn(batch):
        img,eve, label, path = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.stack(eve, 0), torch.cat(label, 0), path
    

def crop_702(img, labels):
    """
    裁剪1280x720的图像为702x702，标签同步变换。
    labels: (N, 5) [cls,x1, y1, x2, y2]，像素坐标
    返回: img_crop, labels_crop
    """
    H, W, _ = img.shape
    assert W == 1280 and H == 720, "输入图像尺寸必须为1280x720"

    # 无标签，随机裁剪
    if labels.shape[0] == 0:
        left = np.random.randint(0, W - 702 + 1)
        top = np.random.randint(0, H - 702 + 1)
        img_crop = img[top:top+702, left:left+702, :]
        labels_crop = np.zeros((0, 5), dtype=labels.dtype)
        return img_crop, labels_crop

    # 一个标签，裁剪区域要完全包围该框
    elif labels.shape[0] == 1:
        x1, y1, x2, y2 = labels[0, 1:].astype(int)
        # 计算left和top的合法范围
        min_left = max(0, x2 - 702)
        max_left = min(x1, W - 702)
        min_top = max(0, y2 - 702)
        max_top = min(y1, H - 702)
        if min_left > max_left or min_top > max_top:
            # 无法裁剪出包含该框的区域，返回全0
            img_crop = np.zeros((702, 702, 3), dtype=img.dtype)
            labels_crop = np.zeros((0, 5), dtype=labels.dtype)
            return img_crop, labels_crop
        left = np.random.randint(min_left, max_left + 1)
        top = np.random.randint(min_top, max_top + 1)
        img_crop = img[top:top+702, left:left+702, :]
        # 标签同步偏移
        labels_crop = labels.copy()
        labels_crop[:, 1] -= left
        labels_crop[:, 3] -= left
        labels_crop[:, 2] -= top
        labels_crop[:, 4] -= top
        # 剪裁后超出范围的标签设为0
        if (labels_crop[0, 1] < 0 or labels_crop[0, 3] > 702 or
            labels_crop[0, 2] < 0 or labels_crop[0, 4] > 702):
            labels_crop = np.zeros((0, 5), dtype=labels.dtype)
        return img_crop, labels_crop

    # 两个标签，对半切分，随机选左/右或上/下，再裁剪
    elif labels.shape[0] == 2:
        # 横向对半
        if np.random.rand() < 0.5:
            # 左半
            left = 0
            img_half = img[:, left:left+702, :]
            mask = (labels[:, 1] >= left) & (labels[:, 3] <= left+702)
            labels_half = labels[mask].copy()
            labels_half[:, 1] -= left
            labels_half[:, 3] -= left
            # 随机竖直裁剪
            if H > 702:
                top = np.random.randint(0, H - 702 + 1)
            else:
                top = 0
            img_crop = img_half[top:top+702, :, :]
            labels_crop = labels_half.copy()
            labels_crop[:, 2] -= top
            labels_crop[:, 4] -= top
        else:
            # 右半
            left = W - 702
            img_half = img[:, left:left+702, :]
            mask = (labels[:, 1] >= left) & (labels[:, 3] <= left+702)
            labels_half = labels[mask].copy()
            labels_half[:, 1] -= left
            labels_half[:, 3] -= left
            if H > 702:
                top = np.random.randint(0, H - 702 + 1)
            else:
                top = 0
            img_crop = img_half[top:top+702, :, :]
            labels_crop = labels_half.copy()
            labels_crop[:, 2] -= top
            labels_crop[:, 4] -= top
        # 剪裁后超出范围的标签设为0
        valid = (labels_crop[:, 1] >= 0) & (labels_crop[:, 3] <= 702) & \
                (labels_crop[:, 2] >= 0) & (labels_crop[:, 4] <= 702)
        labels_crop = labels_crop[valid]
        return img_crop, labels_crop

    # 其它情况，返回空
    else:
        img_crop = np.zeros((702, 702, 3), dtype=img.dtype)
        labels_crop = np.zeros((0, 5), dtype=labels.dtype)
        return img_crop, labels_crop

class BigHandDetectionDataset_Ver2(Dataset):

    def __init__(self, 
                 path =  "/home_origin/ChengZY/data/hand_detection",
                 is_train=True,

                 img_size=704, 
                 rect=True,

                 batch_size=16,
                 hyp=None, ):#augment=False, #不做数据增强 （翻转）

        self.top_path = path

        self.data_list = ["/output_2cam_0807_0",
                    "/output_2cam_0807_1",
                    "/output_2cam_0808_0",
                    "/output_2cam_0808_1",
                    "/output_2cam_0808_2",
                    "/output_2cam_0808_3",]


        self.dataset_list = []
        for data in self.data_list:
            for sub in os.listdir(self.top_path + data):
                sub_path = os.path.join(self.top_path + data, sub)

                # print(sub_path)
                dataset = SmallHandDetectionDataset(sub_path,
                                        "event_numpy","rgb_wrap", "rgb_wrap_window_new", is_train)

                self.dataset_list.append(dataset)

        #合并所有dataset
        self.concat_dataset = ConcatDataset(self.dataset_list)
        del self.dataset_list

        self.hyp = hyp
        self.rect = rect
        self.img_size = img_size
        assert img_size % 64 == 0, 'img_size % 64 != 0'
        self.batch_size = batch_size


    def __len__(self):
        return len(self.concat_dataset)

 

    def __getitem__(self, index):
        image, label_origin, img_path, label_path = self.concat_dataset[index]

        h, w = image.shape[:2]

        # img, (h0, w0), (h, w) = self.load_image(image)

        # # Letterbox
        # # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape
        
        # shape = self.img_size

        # img, ratio, pad = self.for_letter_box(img, shape, auto=False, scaleup=False)
        
        
        # shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling

        # Load labels
        labels = [] # (class x_center y_center w h) normalized

        if label_origin.size > 0:
            # Normalized xywh to pixel xyxy format
            labels = label_origin.copy()
            labels[:, 1] =  w * (label_origin[:, 1] - label_origin[:, 3] / 2) 
            labels[:, 2] =  h * (label_origin[:, 2] - label_origin[:, 4] / 2) 
            labels[:, 3] =  w * (label_origin[:, 1] + label_origin[:, 3] / 2) 
            labels[:, 4] =  h * (label_origin[:, 2] + label_origin[:, 4] / 2) 
        else:
            labels = np.zeros((0,5), dtype=np.float32)

        # 开始裁剪数据 

        img, labels =  crop_702(img=image, labels=labels)

        nL = len(labels)  # number of labels
        if nL:
            # convert xyxy to xywh
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])

            # Normalize coordinates 0 - 1
            labels[:, [2, 4]] /= img.shape[0]  # height
            labels[:, [1, 3]] /= img.shape[1]  # width

        
        labels_out = torch.zeros((nL, 6))
        if nL:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        return torch.from_numpy(img), labels_out, img_path
    
    @staticmethod
    def collate_fn(batch):
        img, label, path = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0), path, None
    




