import os
import struct
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomCrop, CenterCrop

from datasets.transforms import find_first, Repeat, toOneHot, ToTensor
from datasets.events_timeslices import chunk_evs_pol_dvs_gesture, get_tmad_slice

mapping = {
    'climb': 0,
    'stand': 1,
    'jump': 2,
    'fall': 3,
    'sit': 4,
    'get up': 5,
    'walk': 6,
    'run': 7,
    'lift': 8,
    'lie': 9,
    'bend': 10,
    'pick': 11
}


class DailyActionDataset(Dataset):
    def __init__(
            self,
            root,
            train=True,
            transform=None,
            target_transform=None,
            clip=10,
            is_train_Enhanced=False,
            dt=1000,
            size=[2, 32, 32],
            ds=4,
            is_spike=False,
            interval_scaling=False,
            T=16,
    ):
        super(DailyActionDataset, self).__init__()

        self.n = 0
        self.root = root
        self.train = train
        self.chunk_size = T  # 一个整数，指定要将事件分割成T块的时间窗口数量。
        self.clip = clip
        self.is_train_Enhanced = is_train_Enhanced
        self.dt = dt
        self.transform = transform
        self.target_transform = target_transform
        self.size = size  # (pol, x, y)
        self.ds = ds

        self.is_spike = is_spike
        self.interval_scaling = interval_scaling
        if train:
            self.class_dir_train = os.listdir(self.root)
        else:
            self.class_dir_test = os.listdir(self.root)

        if train:
            root_train = self.root
            self.files_train = []
            for self.now_file, self.label_train_one, self.files_train_one in os.walk(root_train):
                i = 0
                for now_data_file in self.files_train_one:
                    if i < 120 * 0.8:
                        self.files_train.append(os.path.join(self.now_file, now_data_file))
                    i += 1
            self.n = len(self.files_train)
        else:
            root_test = self.root
            self.files_test = []
            for self.now_file, self.label_train_one, self.files_train_one in os.walk(root_test):
                i = 0
                for now_data_file in self.files_train_one:
                    if i >= 120 * 0.8:
                        self.files_test.append(os.path.join(self.now_file, now_data_file))
                    i += 1
            self.n = len(self.files_test)

    def __len__(self):
        return self.n

    def __getitem__(self, idx):

        # Important to open and close in getitem to enable num_workers>0

        if self.train:
            file_path = self.files_train[idx]
            data, target, dt = sample_train(
                file_path,
                T=self.chunk_size,
                is_train_Enhanced=self.is_train_Enhanced,
                random_ratio=0.1,
            )  # return data shape (截取的事件数量,[t,p,x,y]) s.t,(112194,4) target int

            # 进行分T块事件数据
            data = chunk_evs_pol_dvs_gesture(
                data=data,
                dt=dt,
                T=self.chunk_size,
                size=self.size,
                ds=self.ds
            )  # return data(chunks) shape (T,2,346/ds[0],260/ds[1]) s.t.(T,2,173,130)
            if self.is_spike:
                data = np.int64(data > 0)
            if self.interval_scaling:
                data = data / data.max()

            if self.transform is not None:
                data = self.transform(data)

            if self.target_transform is not None:
                target = self.target_transform(target)  # target shape (T,num_classes)

            # data shape (T,2,346/ds[0],260/ds[1]) 即(T,2,173,130) target shape (T,num_classes)
            return data, target
        else:
            file_path = self.files_test[idx]
            data, target, dt = sample_test(
                file_path,
                T=self.chunk_size,
                clip=self.clip,
            )  # return data shape (clip,事件数量,[t,p,x,y]) s.t,(10,75258,4) target int

            data = chunk_evs_pol_dvs_gesture(
                data=data,
                dt=dt,
                T=self.chunk_size,
                size=self.size,
                ds=self.ds
            )  # return data(chunks) shape (T,2,346/ds[0],260/ds[1]) s.t.(T,2,173,130)

            if self.is_spike:
                data = np.int64(data > 0)
            if self.interval_scaling:
                data = data / data.max()

            if self.transform is not None:
                data = self.transform(data)

            if self.target_transform is not None:
                target = self.target_transform(target)  # target shape (T,num_classes)

            # data shape (T,2,346/ds[0],260/ds[1]) 即(T,2,173,130) target shape (T,num_classes)
            return data, target

            # ——————————————————
            # data_temp = []
            # target_temp = []
            # for i in range(self.clip):
            #     # 每个clip都进行分T块事件数据，相对于train阶段只是在截取事件流的时候往下细分了clip个片段
            #     if self.transform is not None:
            #         temp = chunk_evs_pol_dvs_gesture(
            #             data=data[i],
            #             dt=self.dt,
            #             T=self.chunk_size,
            #             size=self.size,
            #             ds=self.ds
            #         )  # return temp(chunks) shape (T,2,346/ds[0],260/ds[1]) s.t.(T,2,173,130)
            #
            #         if self.is_spike:
            #             temp = np.int64(temp > 0)
            #
            #         if self.interval_scaling:
            #             temp = temp / temp.max()
            #
            #         data_temp.append(self.transform(temp))  # data_temp shape (clip,T,2,346/ds[0],260/ds[1])
            #
            #     if self.target_transform is not None:
            #         target_temp.append(self.target_transform(target))
            #
            # data = torch.stack(data_temp)
            # target = torch.stack(target_temp)
            #
            # # data shape (clip,T,2,346/ds[0],260/ds[1]) 即(clip,T,2,173,130) target shape (clip,T,num_classes)
            # return data, target


def getDVSeventsDavis(file, numEvents=1e10, startTime=0):
    """ DESCRIPTION: This function reads a given aedat file and converts it into four lists indicating
                     timestamps, x-coordinates, y-coordinates and polarities of the event stream.

    Args:
        file: the path of the file to be read, including extension (str).
        numEvents: the maximum number of events allowed to be read (int, default value=1e10).
        startTime: the start event timestamp (in microseconds) where the conversion process begins (int, default value=0).

    Return:
        ts: list of timestamps in microseconds.
        x: list of x-coordinates in pixels.
        y: list of y-coordinates in pixels.`
        pol: list of polarities (0: on -> off, 1: off -> on).
    """
    # print('\ngetDVSeventsDavis function called \n')
    sizeX = 346
    sizeY = 260
    x0 = 0
    y0 = 0
    x1 = sizeX
    y1 = sizeY

    # print('Reading in at most', str(numEvents))

    triggerevent = int('400', 16)
    polmask = int('800', 16)
    xmask = int('003FF000', 16)
    ymask = int('7FC00000', 16)
    typemask = int('80000000', 16)
    typedvs = int('00', 16)
    xshift = 12
    yshift = 22
    polshift = 11
    x = []
    y = []
    ts = []
    pol = []
    numeventsread = 0

    length = 0
    aerdatafh = open(file, 'rb')
    k = 0
    p = 0
    statinfo = os.stat(file)
    if length == 0:
        length = statinfo.st_size
    # print("file size", length)

    lt = aerdatafh.readline()
    while lt and str(lt)[2] == "#":
        p += len(lt)
        k += 1
        lt = aerdatafh.readline()
        continue

    aerdatafh.seek(p)
    tmp = aerdatafh.read(8)
    p += 8
    while p < length:
        ad, tm = struct.unpack_from('>II', tmp)
        ad = abs(ad)
        if tm >= startTime:
            if (ad & typemask) == typedvs:
                xo = sizeX - 1 - float((ad & xmask) >> xshift)
                yo = float((ad & ymask) >> yshift)
                polo = 1 - float((ad & polmask) >> polshift)
                if xo >= x0 and xo < x1 and yo >= y0 and yo < y1:
                    x.append(xo)
                    y.append(yo)
                    pol.append(polo)
                    ts.append(tm)
        aerdatafh.seek(p)
        tmp = aerdatafh.read(8)
        p += 8
        numeventsread += 1

    # print('Total number of events read =', numeventsread)
    # print('Total number of DVS events returned =', len(ts))

    return ts, x, y, pol


def sample_train(
        hdf5_file,
        T=60,
        is_train_Enhanced=True,
        random_ratio=0.1,  # 表示在截取中，新的事件序列开始值为原序列开头到序列长度*a中的任意值
):
    """
    Args:
        hdf5_file： HDF5 文件的路径。
        T： 分为T段
        dt：样本之间的时间间隔。
        is_train_Enhanced（训练增强）： 布尔标志，表示训练是否增强。

    该函数使用 getDVSeventsDavis 函数从 HDF5 文件中获取事件数据，并将其存储在 data_dvs 变量中。然后从 data_dvs 的第一个元素中提取开始时间 tbegin。
    如果 data_dvs 中的最后时间与 tbegin 之间的差值小于 T*dt，函数就会将 start_time 变量设置为 tbegin。否则，函数将根据 T 和 dt 计算最大可能的开始时间 tend，如果 is_train_Enhanced 为 True，则随机选择 tbegin 和 tend 之间的一个值，否则将 start_time 设为 0。
    然后，函数调用 get_tmad_slice 函数，根据 start_time 和 T*dt 提取事件数据的时间片。
    最后，函数从时间片中的所有时间值中减去时间片的第一个元素，并返回得到的时间片，以及从映射字典和 time_all 变量中提取的标签。
    """
    label = mapping[hdf5_file.split('\\')[-2]]
    data_dvs = np.array(getDVSeventsDavis(hdf5_file), dtype=np.int64)
    tbegin = data_dvs[0][0]
    tend = data_dvs[0][-1]
    time_all = data_dvs[0][-1] - tbegin
    if is_train_Enhanced:
        range_time = time_all * random_ratio
        start_time = np.random.randint(tbegin, tbegin + range_time)
        end_time = np.random.randint(tend - range_time, tend)
        dt = (end_time - start_time) // T
    else:
        start_time = tbegin
        end_time = tend
        dt = (end_time - start_time) // T

    tmad = get_tmad_slice(
        data_dvs[0],
        data_dvs[1:, :].T,
        start_time,
        T * dt
    )  # tmad [[t,x,y,p],[t,x,y,p],...] shape(截取的事件流数量,,4) s.t.(112194,4)
    try:
        # print("sample_train for tmad[0:0]", tmad[0, 0])
        # print("sample_train for tmad[0:0]", tmad[0, 0])
        tmad[:, 0] -= tmad[0, 0]

    except Exception as e:
        print("error in sample_train for", hdf5_file)
        print(e)
        print('tmad.shape', tmad.shape, 'tmad', tmad)
        # print(e, 'tmad[0,0]', tmad[0, 0], 'tmad[1,0]', tmad[1, 0])
    # return tmad:[[t,p,x,y],[t,p,x,y],...] shape (事件长度,4) s.t.(112194,4) label:int time_all:int64
    return tmad[:, [0, 3, 1, 2]], label, dt


def sample_test(
        hdf5_file,
        T=60,
        clip=10,
        is_train_Enhanced=False,
        random_ratio=0.1,

):
    """
    sample_test 函数接收多个参数：
    Args:
        hdf5_file： HDF5 文件的路径。
        T：时间窗口数量，即事件流积累的数量。
        clip：剪辑： 要创建的片段数量。
        dt：样本之间的时间间隔。

    该函数根据 hdf5_file 路径中的最后一个目录从映射字典中检索标签。
    然后，它使用 getDVSeventsDavis 函数从 HDF5 文件中检索事件数据，并将其存储到 data_dvs 变量中。它还会计算开始时间 tbegin（作为 data_dvs 的第一个元素）和结束时间 tend（作为 data_dvs 的最后一个元素），如果结束时间大于 0，则将 tend 设为 0。
    函数计算的总持续时间 time_all 是 data_dvs 最后一个元素和第一个元素之间的差值。
    然后，函数根据 clip、T、dt、start_time 和 end_time 值计算起点列表。start_point 列表将包含每个片段的开始时间。
    最后，函数会创建一个列表 temp 并遍历 start_point 列表。对于每个开始时间，它都会在 tmad 数组中找到相应的索引，并将相应的片段追加到 temp 列表中。
    函数将返回 temp 列表以及标签和 time_all 变量。
    """
    label = mapping[hdf5_file.split('\\')[-2]]
    data_dvs = np.array(getDVSeventsDavis(hdf5_file), dtype=np.int64)
    tbegin = data_dvs[0][0]
    tend = data_dvs[0][-1]
    time_all = data_dvs[0][-1] - tbegin
    if is_train_Enhanced:
        range_time = time_all * random_ratio
        start_time = np.random.randint(tbegin, tbegin + range_time)
        end_time = np.random.randint(tend - range_time, tend)
        dt = (end_time - start_time) // T

    else:
        start_time = tbegin
        end_time = tend
        dt = (end_time - start_time) // T

    tmad = get_tmad_slice(
        data_dvs[0],
        data_dvs[1:, :].T,
        tbegin,
        tend - tbegin
    )  # tmad [[t,x,y,p],[t,x,y,p],...] shape(截取的事件流数量,4) s.t.(78438,4)
    try:
        tmad[:, 0] -= tmad[0, 0]
    except:
        print("error in sample_test for", hdf5_file)
    #
    # start_time = tmad[0, 0]
    # end_time = tmad[-1, 0]
    #
    # start_point = []
    # """
    # 如果 clip * T * dt - (end_time - start_time) > 0，
    #     则表示预期的剪辑片段时间总长大于该事件的总长度。
    #     在这种情况下，它会计算重叠值，并除以剪辑片段，得到overlap。然后，它会遍历剪辑片段，将相应的起点值追加到列表中。
    #     如果最后一个起点值加上 T * dt 超过了结束时间，则会调整最后一个起点值，确保其不超过结束时间。
    # 如果 if 语句的条件不为真，则意味着没有足够的时间来均匀地分配剪辑片段而不发生重叠。
    #     则表示预期的剪辑片段时间总长小于该事件的总长度。
    #     在这种情况下，它会计算重叠值，并除以剪辑片段，得到overlap。
    #     然后，遍历片段值，并将相应的起点值追加到列表中。
    #     如果最后一个起点值加上 T * dt 超过了结束时间，它就会调整最后一个起点值，以确保不超过结束时间。
    # """
    # if clip * T * dt - (end_time - start_time) > 0:
    #     overlap = int(
    #         np.floor((clip * T * dt - (end_time - start_time)) / clip))
    #     for j in range(clip):
    #         start_point.append(j * (T * dt - overlap))
    #         if start_point[-1] + T * dt > end_time:
    #             diff = start_point[-1] + T * dt - end_time
    #             start_point[-1] = start_point[-1] - diff
    # else:
    #     overlap = int(
    #         np.floor(((end_time - start_time) - clip * T * dt) / clip))
    #     for j in range(clip):
    #         start_point.append(j * (T * dt + overlap))
    #         if start_point[-1] + T * dt > end_time:
    #             diff = start_point[-1] + T * dt - end_time
    #             start_point[-1] = start_point[-1] - diff
    #
    # temp = []
    # for start in start_point:
    #     idx_beg = find_first(tmad[:, 0], start)
    #     idx_end = find_first(tmad[:, 0][idx_beg:], start + T * dt) + idx_beg
    #     temp.append(tmad[idx_beg:idx_end][:, [0, 3, 1, 2]])
    #
    # # return temp是[[[t,p,x,y],...]],[[t,p,x,y],...],...] shape(clip,事件长度,4)  s.t. (10,78438,4) ;label:int ,s.t. 4
    # return temp, label, time_all
    return tmad[:, [0, 3, 1, 2]], label, time_all


def create_datasets(
        root=None,
        train=True,
        ds=4,
        dt=1000,
        transform=None,
        target_transform=None,
        n_events_attention=None,
        clip=10,
        is_train_Enhanced=False,
        is_spike=False,
        interval_scaling=False,
        T=16,
):
    if isinstance(ds, int):
        ds = [ds, ds]

    size = [2, 346 // ds[0], 260 // ds[1]]  # 原本的数据格式是[2,346,260]
    if n_events_attention is None:
        def default_transform():
            return Compose([
                ToTensor(),
                CenterCrop(128)
            ])
    else:
        def default_transform():
            return Compose([
                ToTensor(),
                CenterCrop(128)
            ])

    if transform is None:
        transform = default_transform()

    if target_transform is None:
        target_transform = torch.tensor

    dataset = DailyActionDataset(
        root,
        train=train,
        transform=transform,
        target_transform=target_transform,
        is_train_Enhanced=is_train_Enhanced,
        dt=dt,
        size=size,
        ds=ds,
        is_spike=is_spike,
        interval_scaling=interval_scaling,
        T=T,
        clip=clip,
    )
    return dataset
