import os
import struct
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomCrop, CenterCrop

from datasets.transforms import find_first, Repeat, toOneHot, ToTensor
from datasets.events_timeslices import chunk_evs_pol_dvs_gesture, get_tmad_slice

from datasets.thu_chl.process import *

repr_map = {'eventFrame': get_eventFrame,
            'eventAccuFrame': get_eventAccuFrame,
            'timeSurface': get_timeSurface,
            'eventCount': get_eventCount}


class THU_EACT_50_CHL(Dataset):
    def __init__(self,
                 root,
                 train=True,
                 transform=None,
                 target_transform=None,
                 clip=10,
                 is_train_Enhanced=False,
                 dt=1000,
                 size=[2, 32, 32],
                 ds=4,
                 is_spike=False,
                 interval_scaling=False,
                 T=16,
                 repr=['eventAccuFrame']):
        super(THU_EACT_50_CHL, self).__init__()
        list_file_name = os.path.join(root, "train.txt") if train else os.path.join(root, "test.txt")

        self.n = 0
        self.root = root
        self.train = train
        self.chunk_size = T
        self.clip = clip
        self.is_train_Enhanced = is_train_Enhanced
        self.dt = dt
        self.transform = transform
        self.target_transform = target_transform
        self.size = size
        self.ds = ds

        self.is_spike = is_spike
        self.interval_scaling = interval_scaling
        if train:
            self.files_train = []
            self.files_train_label = []
            list_file = open(list_file_name, "r")
            for line in list_file:
                file, label = line.split(" ")
                self.files_train.append(os.path.join(root, file.split('/')[-1]))
                self.files_train_label.append(int(label))
            list_file.close()
            self.classes = np.unique(self.files_train_label)
            self.n = len(self.files_train)
        else:
            self.files_test = []
            self.files_test_label = []
            list_file = open(list_file_name, "r")
            for line in list_file:
                file, label = line.split(" ")
                self.files_test.append(os.path.join(root, file.split('/')[-1]))
                self.files_test_label.append(int(label))
            list_file.close()
            self.classes = np.unique(self.files_test_label)
            self.n = len(self.files_test)

    def __len__(self):
        return self.n

    def __getitem__(self, idx):
        """
        returns events and label, loading events from aedat
        数据格式读取为[事件长度,(x,y,t,p)],目标返回类型为[(t,x,y,p),事件长度]
        :param idx:
        :return:t,x,y,p  label
        """
        if self.train:
            file_path = self.files_train[idx]
            target = self.files_train_label[idx]
            data, time_all = sample_train(
                file_path,
                T=self.chunk_size,
                is_train_Enhanced=self.is_train_Enhanced,
                dt=self.dt
            )
            data = chunk_evs_pol_dvs_gesture(
                data=data,
                dt=self.dt,
                T=self.chunk_size,
                size=self.size,
                ds=self.ds
            )
            if self.is_spike:
                data = np.int64(data > 0)
            if self.interval_scaling:
                data = data / data.max()

            if self.transform is not None:
                data = self.transform(data)

            if self.target_transform is not None:
                target = self.target_transform(target)

            return data, target
        else:
            file_path = self.files_test[idx]
            target = self.files_test_label[idx]
            data, time_all = sample_test(
                file_path,
                T=self.chunk_size,
                clip=self.clip,
                dt=self.dt
            )

            data_temp = []
            target_temp = []
            for i in range(self.clip):

                if self.transform is not None:
                    temp = chunk_evs_pol_dvs_gesture(
                        data=data[i],
                        dt=self.dt,
                        T=self.chunk_size,
                        size=self.size,
                        ds=self.ds
                    )

                    if self.is_spike:
                        temp = np.int64(temp > 0)

                    if self.interval_scaling:
                        temp = temp / temp.max()

                    data_temp.append(self.transform(temp))

                if self.target_transform is not None:
                    target_temp.append(self.target_transform(target))

            data = torch.stack(data_temp)
            target = torch.stack(target_temp)

            return data, target


def sample_train(
        hdf5_file,
        T=60,
        dt=1000,
        is_train_Enhanced=False
):
    data_dvs = np.load(hdf5_file).astype(np.int64)

    # 引入的
    # normalize the timestamps
    _min = data_dvs[:, 2].min()
    _max = data_dvs[:, 2].max()
    # data_dvs[:, 2] = (data_dvs[:, 2] - _min) / (_max - _min)

    data_dvs = data_dvs.transpose()  # [:,(x,y,t,p)] ->[(x,y,t,p),:]
    # x,y,t,p [0,1,2,3] -> t,x,y,p [2,0,1,3]
    # 指定的顺序
    desired_order = [2, 0, 1, 3]  # 这表示希望将第二维按照指定的顺序重新排列
    # 使用指定的顺序重新排列数组
    data_dvs = data_dvs[desired_order, :]

    tbegin = data_dvs[0][0]
    time_all = data_dvs[0][-1] - data_dvs[0][0]
    if data_dvs[0][-1] - tbegin < T * dt:
        start_time = tbegin
    else:
        tend = np.maximum(0, data_dvs[0][-1] - T * dt)
        try:
            # start_time = np.random.randint(tbegin, tend) if is_train_Enhanced else 0
            start_time = np.random.randint(tbegin, high=min(tend, np.iinfo(np.int64).max),
                                           dtype=np.int64) if is_train_Enhanced else 0

        except:
            pass
    tmad = get_tmad_slice(
        data_dvs[0],
        data_dvs[1:, :].T,
        start_time,
        T * dt
    )
    try:
        tmad[:, 0] -= tmad[0, 0]
    except:
        print(hdf5_file)
    return tmad[:, [0, 3, 1, 2]], time_all


def sample_test(
        hdf5_file,
        T=60,
        clip=10,
        dt=1000
):
    data_dvs = np.load(hdf5_file).astype(np.int64)
    # 引入的
    # normalize the timestamps
    _min = data_dvs[:, 2].min()
    _max = data_dvs[:, 2].max()
    # data_dvs[:, 2] = (data_dvs[:, 2] - _min) / (_max - _min)

    data_dvs = data_dvs.transpose()  # [:,(x,y,t,p)] ->[(x,y,t,p),:]
    # x,y,t,p [0,1,2,3] -> t,x,y,p [2,0,1,3]
    # 指定的顺序
    desired_order = [2, 0, 1, 3]  # 这表示希望将第二维按照指定的顺序重新排列
    # 使用指定的顺序重新排列数组
    data_dvs = data_dvs[desired_order, :]

    tbegin = data_dvs[0][0]
    tend = np.maximum(0, data_dvs[0][-1])
    time_all = data_dvs[0][-1] - data_dvs[0][0]

    tmad = get_tmad_slice(
        data_dvs[0],
        data_dvs[1:, :].T,
        tbegin,
        tend - tbegin
    )
    try:
        tmad[:, 0] -= tmad[0, 0]
    except:
        print(hdf5_file)

    start_time = tmad[0, 0]
    end_time = tmad[-1, 0]

    start_point = []
    if clip * T * dt - (end_time - start_time) > 0:
        overlap = int(
            np.floor((clip * T * dt - (end_time - start_time)) / clip))
        for j in range(clip):
            start_point.append(j * (T * dt - overlap))
            if start_point[-1] + T * dt > end_time:
                diff = start_point[-1] + T * dt - end_time
                start_point[-1] = start_point[-1] - diff
    else:
        overlap = int(
            np.floor(((end_time - start_time) - clip * T * dt) / clip))
        for j in range(clip):
            start_point.append(j * (T * dt + overlap))
            if start_point[-1] + T * dt > end_time:
                diff = start_point[-1] + T * dt - end_time
                start_point[-1] = start_point[-1] - diff

    temp = []
    for start in start_point:
        idx_beg = find_first(tmad[:, 0], start)
        idx_end = find_first(tmad[:, 0][idx_beg:], start + T * dt) + idx_beg
        temp.append(tmad[idx_beg:idx_end][:, [0, 3, 1, 2]])

    return temp, time_all


def create_datasets(
        root=None,
        train=True,
        ds=4,
        dt=1000,
        transform=None,
        target_transform=None,
        n_events_attention=None,
        clip=10,
        is_train_Enhanced=False,
        is_spike=False,
        interval_scaling=False,
        T=16,
):
    if isinstance(ds, int):
        ds = [ds, ds]

    size = [2, 346 // ds[0], 260 // ds[1]]

    if n_events_attention is None:
        def default_transform():
            return Compose([
                ToTensor(),
                # CenterCrop(128)
            ])
    else:
        def default_transform():
            return Compose([
                ToTensor(),
                # CenterCrop(128)
            ])

    if transform is None:
        transform = default_transform()

    if target_transform is None:
        target_transform = Compose([
            Repeat(T), toOneHot(50)
        ])

    dataset = THU_EACT_50_CHL(
        root,
        train=train,
        transform=transform,
        target_transform=target_transform,
        is_train_Enhanced=is_train_Enhanced,
        dt=dt,
        size=size,
        ds=ds,
        is_spike=is_spike,
        interval_scaling=interval_scaling,
        T=T,
        clip=clip,
    )
    return dataset
