from pathlib import Path
import json
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset

from .directory import DSECDirectory
from .io import extract_from_h5_by_timewindow

from typing import List, Tuple, Iterable, Dict, Callable, Union, Optional
from rich import print
from rich.progress import track
import torchvision.transforms as T
import torchvision.transforms.functional as F


def compute_img_idx_to_track_idx(t_track, t_image):
    x, counts = np.unique(t_track, return_counts=True)
    i, j = (x.reshape((-1,1)) == t_image.reshape((1,-1))).nonzero()
    deltas = np.zeros_like(t_image)

    deltas[j] = counts[i]

    idx = np.concatenate([np.array([0]), deltas]).cumsum()
    return np.stack([idx[:-1], idx[1:]], axis=-1).astype("uint64")


@torch.jit.script
def events_to_tensor(target: torch.Tensor, t: torch.Tensor, p: torch.Tensor, y: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
    seq_len, channels, height, width = target.shape
    shape = target.shape
    
    min_ts, max_ts = torch.min(t), torch.max(t)
    t = torch.clamp(torch.floor((t - min_ts) / (max_ts - min_ts) * seq_len), 0, seq_len - 1)

    indices = torch.zeros_like(t)
    elem_num = 1
    for dim, tensor in enumerate((t, p, y, x)):
        indices = (indices + tensor) * (shape[dim + 1] if dim < 3 else 1)
        elem_num *= shape[dim]
    
    target.view(-1).scatter_add_(0, indices.long(), torch.ones_like(indices, dtype = target.dtype))

    return target


def label_bbox_filter(x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, h: torch.Tensor) -> torch.Tensor:
    clip = lambda a, b: torch.clamp(a, torch.zeros_like(a), torch.full_like(a, b))

    left = clip(x + 0.0, 640.0)
    right = clip(x + w, 640.0)
    up = clip(y + 0.0, 640.0)
    bottom = clip(y + h, 640.0)

    center_x = (left + right) / 2
    center_y = (up + bottom) / 2
    width = (right - left)
    height = (bottom - up)

    aspect_ratio = torch.max(width, torch.ones_like(width)) / torch.max(height, torch.ones_like(height))
    size = (width * height)
    diag = (width ** 2.0 + height ** 2.0) ** 0.5

    area_filter = torch.logical_and(size >= 20.0, size <= (320.0 * 240.0))
    boundary_filter = torch.logical_and(diag >= 30.0, torch.logical_and(width >= 20.0, height >= 20.0))

    return torch.logical_and(area_filter, boundary_filter), torch.stack([center_x, center_y, width, height]).permute(1, 0).to(torch.float64)


def label_class_filter(labels: torch.Tensor) -> torch.Tensor:
    new_labels = torch.zeros_like(labels)
    indices = torch.zeros_like(labels)
    for src, dest in zip(range(8), (0, None, 1, 1, 1, None, None, None)):
        if dest is not None:
            indices = torch.logical_or(indices, labels == src)
            new_labels = torch.where(labels == src, torch.full_like(new_labels, dest), new_labels)
    return indices, new_labels


def random_horizontal_flip(image, events, labels, p=0.5):
    if torch.rand(1) < p:
        image = F.hflip(image)
        events = F.hflip(events)
        labels[:, 0] = torch.clamp(1.0 - labels[:, 0], 0, 1)  # 目标框中心点水平翻转
    return image, events, labels


import torch
import torchvision.transforms.functional as F
import torchvision.transforms as T

def random_crop(image, events, labels, label_classes, size, p=0.2):
    if torch.rand(1) < p:
        i, j, h, w = T.RandomCrop.get_params(image, output_size=size)
        # 计算原图的宽高
        height, width = image.shape[-2], image.shape[-1]

        # 将labels从归一化形式转换为绝对位置和大小
        labels_abs = labels.clone()
        labels_abs[:, 0] = labels[:, 0] * width  # 中心点x位置
        labels_abs[:, 1] = labels[:, 1] * height  # 中心点y位置
        labels_abs[:, 2] = labels[:, 2] * width  # 宽度w
        labels_abs[:, 3] = labels[:, 3] * height  # 高度h

        # 计算目标框的边界
        x_min = labels_abs[:, 0] - labels_abs[:, 2] / 2
        x_max = labels_abs[:, 0] + labels_abs[:, 2] / 2
        y_min = labels_abs[:, 1] - labels_abs[:, 3] / 2
        y_max = labels_abs[:, 1] + labels_abs[:, 3] / 2

        # 裁切目标框的边界到图像范围
        x_min = torch.clamp(x_min - j, 0, w)
        x_max = torch.clamp(x_max - j, 0, w)
        y_min = torch.clamp(y_min - i, 0, h)
        y_max = torch.clamp(y_max - i, 0, h)

        # 更新labels_abs
        labels_abs[:, 0] = (x_min + x_max) / 2  # 更新中心点x
        labels_abs[:, 1] = (y_min + y_max) / 2  # 更新中心点y
        labels_abs[:, 2] = x_max - x_min       # 更新宽度w
        labels_abs[:, 3] = y_max - y_min       # 更新高度h

        # 去除完全超出图像边界的框
        keep = (x_max > x_min) & (y_max > y_min)

        # 过滤掉不需要的框
        labels_abs = labels_abs[keep]
        label_classes = label_classes[keep]

        # 如果过滤后有目标框，则将它们归一化
        if labels_abs.shape[0] > 0:
            labels = torch.zeros_like(labels_abs)
            labels[:, 0] = labels_abs[:, 0] / w
            labels[:, 1] = labels_abs[:, 1] / h
            labels[:, 2] = labels_abs[:, 2] / w
            labels[:, 3] = labels_abs[:, 3] / h
        else:
            labels = torch.empty((0, 4))  # 如果没有目标框，labels 为空

        # 对图像和事件进行裁剪
        image = F.crop(image, i, j, h, w)
        events = F.crop(events, i, j, h, w)

    return image, events, labels, label_classes


def random_translate(image, events, labels, label_classes, max_translate=0.2):
    tx = torch.empty(1).uniform_(-max_translate, max_translate).item()
    ty = torch.empty(1).uniform_(-max_translate, max_translate).item()
    
    tx_px, ty_px = int(tx * image.shape[-1]), int(ty * image.shape[-2])
    image = F.affine(image, angle=0, translate=(tx_px, ty_px), scale=1, shear=0)
    events = F.affine(events, angle=0, translate=(tx_px, ty_px), scale=1, shear=0)
    
    # 计算labels的绝对位置和大小
    labels_abs = labels.clone()
    height, width = image.shape[-2], image.shape[-1]
    labels_abs[:, 0] = labels[:, 0] * width  # 中心点x
    labels_abs[:, 1] = labels[:, 1] * height  # 中心点y
    labels_abs[:, 2] = labels[:, 2] * width  # 宽度w
    labels_abs[:, 3] = labels[:, 3] * height  # 高度h
    
    # 计算目标框的边界
    x_min = labels_abs[:, 0] - labels_abs[:, 2] / 2
    x_max = labels_abs[:, 0] + labels_abs[:, 2] / 2
    y_min = labels_abs[:, 1] - labels_abs[:, 3] / 2
    y_max = labels_abs[:, 1] + labels_abs[:, 3] / 2

    # 更新目标框的位置
    x_min += tx_px
    x_max += tx_px
    y_min += ty_px
    y_max += ty_px

    # 裁切目标框的边界到图像范围
    x_min = torch.clamp(x_min, 0, width)
    x_max = torch.clamp(x_max, 0, width)
    y_min = torch.clamp(y_min, 0, height)
    y_max = torch.clamp(y_max, 0, height)

    # 更新labels_abs
    labels_abs[:, 0] = (x_min + x_max) / 2  # 更新中心点x
    labels_abs[:, 1] = (y_min + y_max) / 2  # 更新中心点y
    labels_abs[:, 2] = x_max - x_min       # 更新宽度w
    labels_abs[:, 3] = y_max - y_min       # 更新高度h

    # 去除完全超出图像边界的框
    keep = (x_max > x_min) & (y_max > y_min)

    # 过滤掉不需要的框
    labels_abs = labels_abs[keep]
    label_classes = label_classes[keep]

    # 如果过滤后有目标框，则将它们归一化
    if labels_abs.shape[0] > 0:
        labels = torch.zeros_like(labels_abs)
        labels[:, 0] = labels_abs[:, 0] / width
        labels[:, 1] = labels_abs[:, 1] / height
        labels[:, 2] = labels_abs[:, 2] / width
        labels[:, 3] = labels_abs[:, 3] / height
    else:
        labels = torch.empty((0, 4))  # 如果没有目标框，labels 为空
    
    return image, events, labels, label_classes



def random_zoom(image, events, labels, scale_range=(0.8, 1.2)):
    scale = torch.empty(1).uniform_(*scale_range).item()
    height, width = image.shape[-2], image.shape[-1]
    new_height, new_width = int(height * scale), int(width * scale)
    image = F.resize(image, (new_height, new_width), interpolation = T.InterpolationMode.BILINEAR, antialias = True)
    events = F.resize(events, (new_height, new_width), interpolation = T.InterpolationMode.BILINEAR, antialias = True)
    
    return image, events, labels


def apply_transforms(image, events, labels, label_classes):
    image, events, labels = random_horizontal_flip(image, events, labels, p=0.5)

    image, events, labels = random_zoom(image, events, labels, scale_range=(1, 1.5))
    image, events, labels, label_classes = random_translate(image, events, labels, label_classes, max_translate=0.1)
    

    image, events, labels, label_classes = random_crop(image, events, labels, label_classes, size=(240, 320), p=1)
    
    return image, events, labels, label_classes


class DSECDet(Dataset):
    dataset_type = "dsec"
    dataset_shape = (10, 2, 480, 640)
    split_config = {
        "train": ["thun_00_a", "interlaken_00_c", "interlaken_00_d", "interlaken_00_e", "interlaken_00_f", "interlaken_00_g", "zurich_city_00_a", "zurich_city_00_b", "zurich_city_01_a", "zurich_city_01_b", "zurich_city_01_c", "zurich_city_01_d", "zurich_city_01_e", "zurich_city_01_f", "zurich_city_02_a", "zurich_city_02_b", "zurich_city_02_c", "zurich_city_02_d", "zurich_city_02_e", "zurich_city_03_a", "zurich_city_04_a", "zurich_city_04_b", "zurich_city_04_c", "zurich_city_04_d", "zurich_city_04_e", "zurich_city_04_f", "zurich_city_05_a", "zurich_city_05_b", "zurich_city_06_a", "zurich_city_07_a", "zurich_city_08_a", "zurich_city_09_a", "zurich_city_09_b", "zurich_city_09_c", "zurich_city_09_d", "zurich_city_09_e", "zurich_city_10_a", "zurich_city_10_b", "zurich_city_11_a", "zurich_city_11_b", "zurich_city_11_c"],
        "val": ["zurich_city_16_a", "zurich_city_17_a", "zurich_city_18_a", "zurich_city_19_a", "zurich_city_20_a", "zurich_city_21_a"],
        "test": ["thun_01_a", "thun_01_b", "thun_02_a", "interlaken_00_a", "interlaken_00_b", "interlaken_01_a", "zurich_city_12_a", "zurich_city_13_a", "zurich_city_13_b", "zurich_city_14_a", "zurich_city_14_b", "zurich_city_14_c", "zurich_city_15_a"]
    }
    down_sample = 1
    label_columns = ("x", "y", "w", "h", "class_id", "class_confidence")
    classes = {
        0: "pedestrian",
        1: "car"
    }


    def __init__(self, root: Union[str, Path], split: str, transform: Optional[Callable] = None, seq_len: int = 10, ds_factor: int = 2, sync: str = "back", augment: bool = False):
        """
        root: Root to the the DSEC dataset (the one that contains 'train' and 'test'
        split: Can be one of ['train', 'test']
        window_size: Number of microseconds of data
        sync: Can be either 'front' (last event ts), or 'back' (first event ts). Whether the front of the window or
              the back of the window is synced with the images.

        Each sample of this dataset loads one image, events, and labels at a timestamp. The behavior is different for 
        sync='front' and sync='back', and these are visualized below.

        Legend: 
        . = events
        | = image
        L = label

        sync='front'
        -------> time
        .......|
               L

        sync='back'
        -------> time
        |.......
               L
        
        """
        super().__init__()
        if not isinstance(root, Path):
            root = Path(root)
        assert root.exists()
        split = "train" if split == "train" else "test"
        assert (root / split).exists()
        assert sync in ['front', 'back']

        self.root = root
        self.split = split
        self.sync = sync
        self.transform = transform
        self.augment = augment

        self.sequence_len = seq_len
        self.dataset_shape = (self.sequence_len, 2, self.dataset_shape[-2] // ds_factor, self.dataset_shape[-1] // ds_factor)
        self.down_sample = ds_factor

        self.directories = dict()
        self.img_idx_track_idxs = dict()
        
        available_dirs = list((self.root / self.split).glob("*/"))
        self.subsequence_directories = [(self.root / self.split / s) for s in self.split_config[split] if (self.root / self.split / s) in available_dirs]
        
        self.subsequence_directories = sorted(self.subsequence_directories, key=self.first_time_from_subsequence)
        self.data_list = self.get_data_list()


    def get_data_list(self, force_reload: int = False) -> List:
        for f in self.subsequence_directories:
            directory = DSECDirectory(f)
            self.directories[f.name] = directory
            self.img_idx_track_idxs[f.name] = compute_img_idx_to_track_idx(directory.tracks.tracks['t'],
                                                                           directory.images.timestamps)
        cache_filename = self.root / (self.split + "_cache.json")
        res = []
        loaded = False
        if cache_filename.exists() and not force_reload:
            try:
                with open(cache_filename, "r", encoding = "utf-8") as f:
                    res = json.load(f)
                loaded = True
            except:
                loaded = False
        if not loaded:
            for idx in track(range(sum(len(v)-1 for v in self.img_idx_track_idxs.values())), description = "Generating data list"):
                labels = self.get_tracks(idx)
                labels_classes = torch.from_numpy(labels["class_id"].copy())
                if len(labels_classes):
                    valid_indices, _ = label_class_filter(labels_classes)
                    valid_boxes, _ = label_bbox_filter(*(torch.from_numpy(labels[k].copy()) for k in ("x", "y", "w", "h")))
                    if torch.sum(torch.logical_and(valid_indices, valid_boxes)):
                        res.append(idx)
            with open(cache_filename, "w", encoding = "utf-8") as f:
                json.dump(res, f)
        return res


    def first_time_from_subsequence(self, subsequence):
        return np.genfromtxt(subsequence / "images/timestamps.txt", dtype="int64")[0]


    def __len__(self):
        return len(self.data_list)


    def __getitem__(self, item) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        item = self.data_list[item]

        tracks = self.get_tracks(item)
        valid_bboxes, label_bboxes = label_bbox_filter(*(torch.from_numpy(tracks[k].astype(np.float64)) for k in ("x", "y", "w", "h")))
        valid_classes, label_classes = label_class_filter(torch.from_numpy(tracks["class_id"].astype(np.float32)))
        label_bboxes[:, ::2] /= 640.0
        label_bboxes[:, 1::2] /= 480.0
        valid = torch.logical_and(valid_bboxes, valid_classes)
        label_bboxes = label_bboxes[valid]
        label_classes = label_classes[valid][..., None]

        image = self.get_image(item)
        image = torch.from_numpy(image).permute(2, 0, 1).float()
        if self.down_sample:
            image = torch.nn.functional.avg_pool2d(image[None], self.down_sample)[0]

        events = self.get_events(item)
        ev_repr = events_to_tensor(
            torch.zeros(self.dataset_shape, dtype = torch.uint8),
            torch.from_numpy(events["t"].astype(np.float32)),
            torch.from_numpy(events["p"].astype(np.float32)),
            torch.from_numpy((events["y"] // self.down_sample).astype(np.float32)),
            torch.from_numpy((events["x"] // self.down_sample).astype(np.float32))
        )
        if self.augment == True:
            image, ev_repr, label_bboxes, label_classes = apply_transforms(image, ev_repr, label_bboxes, label_classes)
        if self.transform is not None:
            image = self.transform(image)
            ev_repr = self.transform(ev_repr)
        return image, ev_repr, label_bboxes, label_classes


    def get_index_window(self, index, num_idx, sync="back"):
        if sync == "front":
            assert 0 < index < num_idx
            i_0 = index - 1
            i_1 = index
        else:
            assert 0 <= index < num_idx - 1
            i_0 = index
            i_1 = index + 1

        return i_0, i_1


    def get_tracks(self, index, mask=None, directory_name=None):
        index, img_idx_to_track_idx, directory = self.rel_index(index, directory_name)
        i_0, i_1 = self.get_index_window(index, len(img_idx_to_track_idx), sync=self.sync)
        idx0, idx1 = img_idx_to_track_idx[i_1]
        tracks = directory.tracks.tracks[idx0:idx1]

        if mask is not None:
            tracks = tracks[mask[idx0:idx1]]
        
        tracks = tracks.copy()
        x1, y1 = tracks['x'], tracks['y']
        x2, y2 = x1 + tracks['w'], y1 + tracks['h']

        x1 = np.clip(x1, 0, 640-1)
        x2 = np.clip(x2, 0, 640-1)

        y1 = np.clip(y1, 0, 480-1)
        y2 = np.clip(y2, 0, 480-1)

        tracks['x'] = x1
        tracks['y'] = y1
        tracks['w'] = x2-x1
        tracks['h'] = y2-y1

        return tracks


    def get_events(self, index, directory_name=None):
        index, img_idx_to_track_idx, directory = self.rel_index(index, directory_name)
        i_0, i_1 = self.get_index_window(index, len(img_idx_to_track_idx), sync=self.sync)
        t_0, t_1 = directory.images.timestamps[[i_0, i_1]]
        events = extract_from_h5_by_timewindow(directory.events.event_file, t_0, t_1)
        return events


    def get_image(self, index, directory_name=None):
        index, img_idx_to_track_idx, directory = self.rel_index(index, directory_name)
        image_files = directory.images.image_files_distorted
        image = cv2.imread(str(image_files[index]))
        return image


    def rel_index(self, index, directory_name=None):
        if directory_name is not None:
            img_idx_to_track_idx = self.img_idx_track_idxs[directory_name]
            directory = self.directories[directory_name]
            return index, img_idx_to_track_idx, directory

        for f in self.subsequence_directories:
            img_idx_to_track_idx = self.img_idx_track_idxs[f.name]
            if len(img_idx_to_track_idx)-1 <= index:
                index -= (len(img_idx_to_track_idx)-1)
                continue
            else:
                return index, img_idx_to_track_idx, self.directories[f.name]
        else:
            raise ValueError


def dsec_collate(raw_data: List[Tuple[torch.Tensor, torch.Tensor]]) -> Dict:
    res_idx = []
    res_data = []
    res_bbox = []
    res_label = []
    for idx, (img, ev, box, cls) in enumerate(raw_data):
        res_data.append(torch.cat([torch.stack([img] * ev.shape[0]), ev], dim = 1))
        res_idx.extend([idx] * len(box))
        res_bbox.append(box)
        res_label.append(cls)

    return {
        'im_file': None,
        'ori_shape': [(img.shape[-2], img.shape[-1])] * len(raw_data),
        'resized_shape': [(img.shape[-2], img.shape[-1])] * len(raw_data),
        'img': torch.stack(res_data).permute(1, 0, 2, 3, 4),
        'cls': torch.cat(res_label, dim = 0),
        'bboxes': torch.cat(res_bbox, dim = 0),
        'batch_idx': torch.tensor(res_idx, dtype = torch.float32),
        'ratio_pad': [((1, 1), (0, 0))] * len(raw_data)
    }