import os
import random
import json
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset

from typing import List, Tuple, Iterable, Dict, Callable, Union, Optional
from rich import print
from rich.progress import track
import torchvision.transforms as T
import torchvision.transforms.functional as F


@torch.jit.script
def events_to_tensor(target: torch.Tensor, t: torch.Tensor, p: torch.Tensor, y: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
    seq_len, channels, height, width = target.shape
    shape = target.shape
    
    min_ts, max_ts = torch.min(t), torch.max(t)
    t = torch.clamp(torch.floor((t - min_ts) / (max_ts - min_ts) * seq_len), 0, seq_len - 1)

    indices = torch.zeros_like(t)
    elem_num = 1
    for dim, tensor in enumerate((t, p, y, x)):
        indices = (indices + tensor) * (shape[dim + 1] if dim < 3 else 1)
        elem_num *= shape[dim]
    
    target.view(-1).scatter_add_(0, indices.long(), torch.ones_like(indices, dtype = target.dtype))

    return target


def label_bbox_filter(x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, h: torch.Tensor) -> torch.Tensor:
    clip = lambda a, b: torch.clamp(a, torch.zeros_like(a), torch.full_like(a, b))

    left = clip(x + 0.0, 640.0)
    right = clip(x + w, 640.0)
    up = clip(y + 0.0, 640.0)
    bottom = clip(y + h, 640.0)

    center_x = (left + right) / 2
    center_y = (up + bottom) / 2
    width = (right - left)
    height = (bottom - up)

    aspect_ratio = torch.max(width, torch.ones_like(width)) / torch.max(height, torch.ones_like(height))
    size = (width * height)
    diag = (width ** 2.0 + height ** 2.0) ** 0.5

    area_filter = torch.logical_and(size >= 20.0, size <= (320.0 * 240.0))
    boundary_filter = torch.logical_and(diag >= 30.0, torch.logical_and(width >= 20.0, height >= 20.0))

    return torch.logical_and(area_filter, boundary_filter), torch.stack([center_x, center_y, width, height]).permute(1, 0).to(torch.float64)


def label_class_filter(labels: torch.Tensor) -> torch.Tensor:
    new_labels = torch.zeros_like(labels)
    indices = torch.zeros_like(labels)
    for src, dest in zip(range(3), (1, 0, None)):
        if dest is not None:
            indices = torch.logical_or(indices, labels == src)
            new_labels = torch.where(labels == src, torch.full_like(new_labels, dest), new_labels)
    return indices, new_labels


def random_horizontal_flip(image, events, labels, p=0.5):
    if torch.rand(1) < p:
        image = F.hflip(image)
        events = F.hflip(events)
        labels[:, 0] = torch.clamp(1.0 - labels[:, 0], 0, 1)  # 目标框中心点水平翻转
    return image, events, labels


def random_crop(image, events, labels, label_classes, size, p=0.2):
    if torch.rand(1) < p:
        i, j, h, w = T.RandomCrop.get_params(image, output_size=size)
        # 计算原图的宽高
        height, width = image.shape[-2], image.shape[-1]

        # 将labels从归一化形式转换为绝对位置和大小
        labels_abs = labels.clone()
        labels_abs[:, 0] = labels[:, 0] * width  # 中心点x位置
        labels_abs[:, 1] = labels[:, 1] * height  # 中心点y位置
        labels_abs[:, 2] = labels[:, 2] * width  # 宽度w
        labels_abs[:, 3] = labels[:, 3] * height  # 高度h

        # 计算目标框的边界
        x_min = labels_abs[:, 0] - labels_abs[:, 2] / 2
        x_max = labels_abs[:, 0] + labels_abs[:, 2] / 2
        y_min = labels_abs[:, 1] - labels_abs[:, 3] / 2
        y_max = labels_abs[:, 1] + labels_abs[:, 3] / 2

        # 裁切目标框的边界到图像范围
        x_min = torch.clamp(x_min - j, 0, w)
        x_max = torch.clamp(x_max - j, 0, w)
        y_min = torch.clamp(y_min - i, 0, h)
        y_max = torch.clamp(y_max - i, 0, h)

        # 更新labels_abs
        labels_abs[:, 0] = (x_min + x_max) / 2  # 更新中心点x
        labels_abs[:, 1] = (y_min + y_max) / 2  # 更新中心点y
        labels_abs[:, 2] = x_max - x_min       # 更新宽度w
        labels_abs[:, 3] = y_max - y_min       # 更新高度h

        # 去除完全超出图像边界的框
        keep = (x_max > x_min) & (y_max > y_min)

        # 过滤掉不需要的框
        labels_abs = labels_abs[keep]
        label_classes = label_classes[keep]

        # 如果过滤后有目标框，则将它们归一化
        if labels_abs.shape[0] > 0:
            labels = torch.zeros_like(labels_abs)
            labels[:, 0] = labels_abs[:, 0] / w
            labels[:, 1] = labels_abs[:, 1] / h
            labels[:, 2] = labels_abs[:, 2] / w
            labels[:, 3] = labels_abs[:, 3] / h
        else:
            labels = torch.empty((0, 4))  # 如果没有目标框，labels 为空

        # 对图像和事件进行裁剪
        image = F.crop(image, i, j, h, w)
        events = F.crop(events, i, j, h, w)

    return image, events, labels, label_classes


def random_translate(image, events, labels, label_classes, max_translate=0.2):
    tx = torch.empty(1).uniform_(-max_translate, max_translate).item()
    ty = torch.empty(1).uniform_(-max_translate, max_translate).item()
    
    tx_px, ty_px = int(tx * image.shape[-1]), int(ty * image.shape[-2])
    image = F.affine(image, angle=0, translate=(tx_px, ty_px), scale=1, shear=0)
    events = F.affine(events, angle=0, translate=(tx_px, ty_px), scale=1, shear=0)
    
    # 计算labels的绝对位置和大小
    labels_abs = labels.clone()
    height, width = image.shape[-2], image.shape[-1]
    labels_abs[:, 0] = labels[:, 0] * width  # 中心点x
    labels_abs[:, 1] = labels[:, 1] * height  # 中心点y
    labels_abs[:, 2] = labels[:, 2] * width  # 宽度w
    labels_abs[:, 3] = labels[:, 3] * height  # 高度h
    
    # 计算目标框的边界
    x_min = labels_abs[:, 0] - labels_abs[:, 2] / 2
    x_max = labels_abs[:, 0] + labels_abs[:, 2] / 2
    y_min = labels_abs[:, 1] - labels_abs[:, 3] / 2
    y_max = labels_abs[:, 1] + labels_abs[:, 3] / 2

    # 更新目标框的位置
    x_min += tx_px
    x_max += tx_px
    y_min += ty_px
    y_max += ty_px

    # 裁切目标框的边界到图像范围
    x_min = torch.clamp(x_min, 0, width)
    x_max = torch.clamp(x_max, 0, width)
    y_min = torch.clamp(y_min, 0, height)
    y_max = torch.clamp(y_max, 0, height)

    # 更新labels_abs
    labels_abs[:, 0] = (x_min + x_max) / 2  # 更新中心点x
    labels_abs[:, 1] = (y_min + y_max) / 2  # 更新中心点y
    labels_abs[:, 2] = x_max - x_min       # 更新宽度w
    labels_abs[:, 3] = y_max - y_min       # 更新高度h

    # 去除完全超出图像边界的框
    keep = (x_max > x_min) & (y_max > y_min)

    # 过滤掉不需要的框
    labels_abs = labels_abs[keep]
    label_classes = label_classes[keep]

    # 如果过滤后有目标框，则将它们归一化
    if labels_abs.shape[0] > 0:
        labels = torch.zeros_like(labels_abs)
        labels[:, 0] = labels_abs[:, 0] / width
        labels[:, 1] = labels_abs[:, 1] / height
        labels[:, 2] = labels_abs[:, 2] / width
        labels[:, 3] = labels_abs[:, 3] / height
    else:
        labels = torch.empty((0, 4))  # 如果没有目标框，labels 为空
    
    return image, events, labels, label_classes



def random_zoom(image, events, labels, scale_range=(0.8, 1.2)):
    scale = torch.empty(1).uniform_(*scale_range).item()
    height, width = image.shape[-2], image.shape[-1]
    new_height, new_width = int(height * scale), int(width * scale)
    image = F.resize(image, (new_height, new_width), interpolation=T.InterpolationMode.BILINEAR, antialias = True)
    events = F.resize(events, (new_height, new_width), interpolation=T.InterpolationMode.BILINEAR, antialias = True)
    
    return image, events, labels


def apply_transforms(image, events, labels, label_classes):
    image, events, labels = random_horizontal_flip(image, events, labels, p=0.5)

    image, events, labels = random_zoom(image, events, labels, scale_range=(1, 1.5))
    image, events, labels, label_classes = random_translate(image, events, labels, label_classes, max_translate=0.1)
    
    image, events, labels, label_classes = random_crop(image, events, labels, label_classes, size=(240, 320), p=1)

    return image, events, labels, label_classes



class SOD(Dataset):
    dataset_type = "genx"
    dataset_shape = (1, 2, 260, 346)
    down_sample = 1
    label_columns = ("x", "y", "w", "h", "class_id", "class_confidence")
    classes = {
        0: "pedestrian",
        1: "car",
        2: "two-wheeler"
    }


    def __init__(self, root: str, mode: str = "train", seq_len: int = 10, augment: bool = False, transform: Optional[Callable] = None) -> None:
        self.root = root
        assert mode in ("train", "val", "test"), "Undefined mode: %s" % (mode,)
        self.mode = mode
        self.seq_len = seq_len
        self.augment = augment
        self.transform = transform
        self.data_list = self.get_data_list(True)


    def get_data_list(self, force_reload: int = False) -> Dict:
        cache_filename = os.path.join(self.root, self.mode + "_cache.json")
        res = dict(
            conds = [],
            files = []
        )
        loaded = False
        if os.path.isfile(cache_filename) and not force_reload:
            try:
                with open(cache_filename, "r", encoding = "utf-8") as f:
                    res = json.load(f)
                loaded = True
            except:
                loaded = False
        if not loaded:
            conds = os.listdir(self.label_folder)
            res["conds"] = conds
            res["files"] = [None] * len(conds)
            for idx, cond in enumerate(conds):
                ev_lists = os.listdir(os.path.join(self.ev_repr_folder, cond))
                img_lists = os.listdir(os.path.join(self.image_folder, cond))
                label_lists = os.listdir(os.path.join(self.label_folder, cond))
                assert len(ev_lists) == len(img_lists) and len(ev_lists) == len(label_lists)
                cond_res = []
                for folder in track(label_lists, description = "Processing condition %s on %s set:" % (cond, self.mode)):
                    sub = os.path.join(cond, folder)
                    file_lists = os.listdir(os.path.join(self.label_folder, sub))
                    valids = []
                    for j in range(1, len(file_lists)):
                        img_filename = os.path.join(self.image_folder, sub, "%d.png" % (j - 1,))
                        ev_filename = os.path.join(self.ev_repr_folder, sub, "%d.npy" % (j,))
                        label_filename = os.path.join(self.label_folder, sub, "%d.json" % (j + 1,))
                        if (not os.path.isfile(img_filename)) or (not os.path.isfile(ev_filename)) or (not os.path.isfile(label_filename)):
                            continue
                        with open(label_filename, "r") as f:
                            labels = json.load(f)
                            labels = labels["shapes"]
                            if not len(labels):
                                continue

                            x, y, w, h, c = [], [], [], [], []
                            for label in labels:
                                (x1, y1), (x2, y2) = label["points"]
                                x.append(x1)
                                y.append(y1)
                                w.append(abs(x2 - x1))
                                h.append(abs(y2 - y1))
                                c.append(int(label["label"]))
                            valid_bboxes, label_bboxes = label_bbox_filter(*(torch.tensor(k, dtype = torch.float64) for k in (x, y, w, h)))
                            valid_classes, label_classes = label_class_filter(torch.tensor(c, dtype = torch.float32))
                            valid = torch.logical_and(valid_bboxes, valid_classes)
                            if torch.sum(valid):
                                valids.append(j)
                    cond_res.append([folder, valids])
                res["files"][idx] = cond_res 
            with open(cache_filename, "w", encoding = "utf-8") as f:
                json.dump(res, f)
        data_list = []
        for idx, cond in enumerate(res["conds"]):
            for sub, valids in res["files"][idx]:
                data_list.extend([[cond, sub, j] for j in valids])
        # random.shuffle(data_list)
        return data_list


    @property
    def ev_repr_folder(self) -> str:
        return os.path.join(self.root, "events_npys", self.mode)


    @property
    def image_folder(self) -> str:
        return os.path.join(self.root, "aps_frames", self.mode)


    @property
    def label_folder(self) -> str:
        return os.path.join(self.root, "annotations", self.mode)


    def get_data(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        cond, sub, file_idx = self.data_list[idx]
        img_filename = os.path.join(self.image_folder, cond, sub, "%d.png" % (file_idx - 1,))
        img = torch.from_numpy(cv2.imread(img_filename)).permute(2, 0, 1)
        ev_filename = os.path.join(self.ev_repr_folder, cond, sub, "%d.npy" % (file_idx,))
        ev_repr = np.array(np.load(ev_filename).tolist(), dtype = np.int64)
        ev_tensor = torch.zeros(self.seq_len, *self.dataset_shape[1:], dtype = torch.uint8)
        if len(ev_repr.shape) and ev_repr.shape[0]:
            ev_repr[:, 0] -= np.min(ev_repr[:, 0])
            ev_repr = torch.from_numpy(ev_repr)
            ev_tensor = events_to_tensor(ev_tensor, ev_repr[:, 0], ev_repr[:, 3], ev_repr[:, 2], ev_repr[:, 1])
        return img, ev_tensor


    def get_labels(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        cond, sub, file_idx = self.data_list[idx]
        label_filename = os.path.join(self.label_folder, cond, sub, "%d.json" % (file_idx + 1,))
        with open(label_filename, "r") as f:
            labels = json.load(f)
        labels = labels["shapes"]
        bboxes, cls = [], []
        for label in labels:
            (x1, y1), (x2, y2) = label["points"]
            cx, cy, w, h = (x1 + x2) / 2, (y1 + y2) / 2, abs(x2 - x1), abs(y2 - y1)
            bboxes.append([cx, cy, w, h])
            c = int(label["label"])
            cls.append(c)
        bboxes = torch.tensor(bboxes, dtype = torch.double)
        bboxes[:, ::2] /= self.dataset_shape[-1]
        bboxes[:, 1::2] /= self.dataset_shape[-2]
        cls = torch.tensor(cls, dtype = torch.float32)
        valid, cls = label_class_filter(cls)
        bboxes = bboxes[valid]
        cls = cls[valid][..., None]
        return bboxes, cls


    def __len__(self):
        return len(self.data_list)


    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        img, ev = self.get_data(idx)
        box, cls = self.get_labels(idx)
        # ev_img = np.zeros_like(img)
        # ev_img[:, :, 0] = ev.sum(dim = 0)[0].numpy() * 32
        # ev_img[:, :, -1] = ev.sum(dim = 0)[-1].numpy() * 32
        # img = img // 2 + ev_img // 2
        # for b in box:
        #     cx, cy, w, h = b
        #     x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
        #     x1, y1, x2, y2 = int(x1 * self.dataset_shape[-1]), int(y1 * self.dataset_shape[-2]), int(x2 * self.dataset_shape[-1]), int(y2 * self.dataset_shape[-2])
        #     img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
        # cv2.imshow("a", img)
        # cv2.waitKey()
        # cv2.destroyAllWindows()
        # exit()
        if self.augment == True:
            img, ev, box, cls = apply_transforms(img, ev, box, cls)
        if self.transform is not None:
            img = self.transform(img)
            ev = self.transform(ev)
        return img, ev, box, cls


def sod_collate(raw_data: List[Tuple[torch.Tensor, torch.Tensor]]) -> Dict:
    res_idx = []
    res_data = []
    res_bbox = []
    res_label = []
    for idx, (img, ev, box, cls) in enumerate(raw_data):
        res_data.append(torch.cat([torch.stack([img] * ev.shape[0]), ev], dim = 1))
        res_idx.extend([idx] * len(box))
        res_bbox.append(box)
        res_label.append(cls)

    return {
        'im_file': None,
        'ori_shape': [(img.shape[-2], img.shape[-1])] * len(raw_data),
        'resized_shape': [(img.shape[-2], img.shape[-1])] * len(raw_data),
        'img': torch.stack(res_data).permute(1, 0, 2, 3, 4),
        'cls': torch.cat(res_label, dim = 0),
        'bboxes': torch.cat(res_bbox, dim = 0),
        'batch_idx': torch.tensor(res_idx, dtype = torch.float32),
        'ratio_pad': [((1, 1), (0, 0))] * len(raw_data)
    }


if __name__ == "__main__":
    root = "D:/Works/data/pku_davis_sod"
    data = SOD(root, "test")
    img, ev, box, cls = data[0]
    print(img.shape, ev.shape, box.shape, cls.shape)