import os
import struct
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomCrop, CenterCrop

from datasets.transforms import find_first, Repeat, toOneHot, ToTensor
from datasets.events_timeslices import chunk_evs_pol_dvs_gesture, get_tmad_slice

mapping = {
    'climb': 0,
    'stand': 1,
    'jump': 2,
    'fall': 3,
    'sit': 4,
    'get up': 5,
    'walk': 6,
    'run': 7,
    'lift': 8,
    'lie': 9,
    'bend': 10,
    'pick': 11
}


class DailyActionDataset(Dataset):
    def __init__(
            self,
            root,
            train=True,
            transform=None,
            target_transform=None,
            clip=10,
            is_train_Enhanced=False,
            size=[2, 346, 260],
            is_spike=False,
            interval_scaling=False,
            T=16,# time_num,指定要将事件分割成T块的时间窗口数量。
            ds=1,
            random_ratio=0.1,
    ):
        super(DailyActionDataset, self).__init__()

        self.n = 0
        self.root = root
        self.train = train
        self.chunk_size = T  # 一个整数，指定要将事件分割成T块的时间窗口数量。
        self.clip = clip
        self.is_train_Enhanced = is_train_Enhanced
        self.transform = transform
        self.target_transform = target_transform
        self.size = size  # (pol, x, y)
        if isinstance(ds, int):
            self.ds = [ds, ds]
        self.is_spike = is_spike
        self.interval_scaling = interval_scaling
        self.random_ratio = random_ratio
        if train:
            self.class_dir_train = os.listdir(self.root)
        else:
            self.class_dir_test = os.listdir(self.root)

        if train:
            root_train = self.root
            self.files_train = []
            for self.now_file, self.label_train_one, self.files_train_one in os.walk(root_train):
                i = 0
                for now_data_file in self.files_train_one:
                    if i < 120 * 0.8:
                        self.files_train.append(os.path.join(self.now_file, now_data_file))
                    i += 1
            self.n = len(self.files_train)
        else:
            root_test = self.root
            self.files_test = []
            for self.now_file, self.label_train_one, self.files_train_one in os.walk(root_test):
                i = 0
                for now_data_file in self.files_train_one:
                    if i >= 120 * 0.8:
                        self.files_test.append(os.path.join(self.now_file, now_data_file))
                    i += 1
            self.n = len(self.files_test)

    def __len__(self):
        return self.n

    def __getitem__(self, idx):

        # Important to open and close in getitem to enable num_workers>0

        if self.train:
            file_path = self.files_train[idx]
            data, target, self.dt = sample_train(
                hdf5_file=file_path,
                T=self.chunk_size,
                is_train_Enhanced=self.is_train_Enhanced,
                random_ratio=self.random_ratio # 0<random_ratio<1
            )  # return data shape (截取的事件数量,[t,p,x,y]) s.t,(112194,4) target int

            # 进行分T块事件数据
            data = chunk_evs_pol_dvs_gesture(
                data=data,
                dt=self.dt,
                T=self.chunk_size,
                size=self.size,
                ds=self.ds
            )  # return data(chunks) shape (T,2,346/ds[0],260/ds[1]) s.t.(T,2,173,130)
            if self.is_spike:
                data = np.int64(data > 0)
            if self.interval_scaling:
                data = data / data.max()

            if self.transform is not None:
                data = self.transform(data)

            if self.target_transform is not None:
                target = self.target_transform(target)  # target shape (T,num_classes)

            # data shape (T,2,346/ds[0],260/ds[1]) 即(T,2,173,130) target shape (T,num_classes)
            return data, target
        else:
            file_path = self.files_test[idx]
            data, target, self.dt = sample_test(
                hdf5_file=file_path,
                T=self.chunk_size,
                clip=self.clip,
                is_test_Enhanced=self.is_train_Enhanced,
                random_ratio=self.random_ratio # 0<random_ratio<1
            )  # return data shape (clip,事件数量,[t,p,x,y]) s.t,(10,75258,4) target int

            data_temp = []
            target_temp = []
            for i in range(self.clip):
                # 每个clip都进行分T块事件数据，相对于train阶段只是在截取事件流的时候往下细分了clip个片段
                if self.transform is not None:
                    temp = chunk_evs_pol_dvs_gesture(
                        data=data[i],
                        dt=self.dt,
                        T=self.chunk_size,
                        size=self.size,
                        ds=self.ds
                    )  # return temp(chunks) shape (T,2,346/ds[0],260/ds[1]) s.t.(T,2,173,130)

                    if self.is_spike:
                        temp = np.int64(temp > 0)

                    if self.interval_scaling:
                        temp = temp / temp.max()

                    data_temp.append(self.transform(temp))  # data_temp shape (clip,T,2,346/ds[0],260/ds[1])

                if self.target_transform is not None:
                    target_temp.append(self.target_transform(target))

            data = torch.stack(data_temp)
            target = torch.stack(target_temp)

            # data shape (clip,T,2,346/ds[0],260/ds[1]) 即(clip,T,2,173,130) target shape (clip,)/(clip,T,num_classes)
            return data, target


def getDVSeventsDavis(file, numEvents=1e10, startTime=0):
    """ DESCRIPTION: This function reads a given aedat file and converts it into four lists indicating
                     timestamps, x-coordinates, y-coordinates and polarities of the event stream.

    Args:
        file: the path of the file to be read, including extension (str).
        numEvents: the maximum number of events allowed to be read (int, default value=1e10).
        startTime: the start event timestamp (in microseconds) where the conversion process begins (int, default value=0).

    Return:
        ts: list of timestamps in microseconds.
        x: list of x-coordinates in pixels.
        y: list of y-coordinates in pixels.`
        pol: list of polarities (0: on -> off, 1: off -> on).
    """
    # print('\ngetDVSeventsDavis function called \n')
    sizeX = 346
    sizeY = 260
    x0 = 0
    y0 = 0
    x1 = sizeX
    y1 = sizeY

    # print('Reading in at most', str(numEvents))

    triggerevent = int('400', 16)
    polmask = int('800', 16)
    xmask = int('003FF000', 16)
    ymask = int('7FC00000', 16)
    typemask = int('80000000', 16)
    typedvs = int('00', 16)
    xshift = 12
    yshift = 22
    polshift = 11
    x = []
    y = []
    ts = []
    pol = []
    numeventsread = 0

    length = 0
    aerdatafh = open(file, 'rb')
    k = 0
    p = 0
    statinfo = os.stat(file)
    if length == 0:
        length = statinfo.st_size
    # print("file size", length)

    lt = aerdatafh.readline()
    while lt and str(lt)[2] == "#":
        p += len(lt)
        k += 1
        lt = aerdatafh.readline()
        continue

    aerdatafh.seek(p)
    tmp = aerdatafh.read(8)
    p += 8
    while p < length:
        ad, tm = struct.unpack_from('>II', tmp)
        ad = abs(ad)
        if tm >= startTime:
            if (ad & typemask) == typedvs:
                xo = sizeX - 1 - float((ad & xmask) >> xshift)
                yo = float((ad & ymask) >> yshift)
                polo = 1 - float((ad & polmask) >> polshift)
                if xo >= x0 and xo < x1 and yo >= y0 and yo < y1:
                    x.append(xo)
                    y.append(yo)
                    pol.append(polo)
                    ts.append(tm)
        aerdatafh.seek(p)
        tmp = aerdatafh.read(8)
        p += 8
        numeventsread += 1

    # print('Total number of events read =', numeventsread)
    # print('Total number of DVS events returned =', len(ts))

    return ts, x, y, pol


def sample_train(
        hdf5_file,
        T=10,
        is_train_Enhanced=False,
        random_ratio=0.1,  # 表示在截取中，新的事件序列开始值为原序列开头到序列长度*a中的任意值
):
    """
    Args:
        hdf5_file： HDF5 文件的路径。
        T： 每个样本的持续时间。
        dt：样本之间的时间间隔。
        is_train_Enhanced（训练增强）： 布尔标志，表示训练是否增强。
        random_ratio: 表示在截取中，新的事件序列开始值为原序列开头到序列长度*a中的任意值

    该函数使用 getDVSeventsDavis 函数从 HDF5 文件中获取事件数据，并将其存储在 data_dvs 变量中。然后从 data_dvs 的第一个元素中提取开始时间 tbegin。
    函数调用 get_tmad_slice 函数，根据 start_time 和 T*dt 提取事件数据的时间片。
    """
    label = mapping[hdf5_file.split('\\')[-2]]
    data_dvs = np.array(getDVSeventsDavis(hdf5_file), dtype=np.int64)
    try:
        data_dvs[0, :] -= data_dvs[0, 0]
    except Exception as e:
        print("error in sample_train for", hdf5_file)
        print(e)
        print('data_dvs.shape', data_dvs.shape, 'data_dvs', data_dvs)
    # tbegin = data_dvs[0][0]
    # tend = data_dvs[0][-1]
    # time_all = tend - tbegin
    lenth = data_dvs.shape[1]
    if is_train_Enhanced:
        range_lenth = lenth * random_ratio
        start_index = np.random.randint(0, int(range_lenth))
        end_index = np.random.randint(int(lenth - range_lenth), int(lenth))
        start_time = data_dvs[0, start_index]
        end_time = data_dvs[0, end_index]
        dt = (end_time - start_time) // T
    else:
        start_time = data_dvs[0,0]
        end_time = data_dvs[0,-1]
        dt = (end_time - start_time) // T

    tmad = get_tmad_slice(
        data_dvs[0],
        data_dvs[1:, :].T,
        start_time,
        T * dt
    )  # tmad [[t,x,y,p],[t,x,y,p],...] shape(截取的事件流数量,,4) s.t.(112194,4)
    try:
        # print("sample_train for tmad[0:0]", tmad[0, 0])
        # print("sample_train for tmad[0:0]", tmad[0, 0])
        tmad[:, 0] -= tmad[0, 0]

    except Exception as e:
        print("error in sample_train for", hdf5_file)
        print(e)
        print('tmad.shape', tmad.shape, 'tmad', tmad)
        # print(e, 'tmad[0,0]', tmad[0, 0], 'tmad[1,0]', tmad[1, 0])
    # return tmad:[[t,p,x,y],[t,p,x,y],...] shape (事件长度,4) s.t.(112194,4) label:int time_all:int64
    return tmad[:, [0, 3, 1, 2]], label, dt


def sample_test(
        hdf5_file,
        T=10,
        clip=10,
        is_test_Enhanced=True,
        random_ratio=0.1,
):
    """
    sample_test 函数接收多个参数：
    Args:
        hdf5_file： HDF5 文件的路径。
        T：时间窗口数量，即事件流积累的数量。
        clip：剪辑： 要创建的片段数量。
        is_train_Enhanced（训练增强）： 布尔标志，表示训练是否增强。
        random_ratio: 表示在截取中，新的事件序列开始值为原序列开头到序列长度*a中的任意值

    """
    label = mapping[hdf5_file.split('\\')[-2]]
    data_dvs = np.array(getDVSeventsDavis(hdf5_file), dtype=np.int64)
    try:
        data_dvs[0,:]-=data_dvs[0,0]
    except Exception as e:
        print("error in sample_train for", hdf5_file)
        print(e)
        print('data_dvs.shape', data_dvs.shape, 'data_dvs', data_dvs)
    # tbegin = data_dvs[0][0]
    # tend = data_dvs[0][-1]
    # time_all = tend - tbegin
    lenth = data_dvs.shape[1]
    # 再分为clip段，每段采用is_test_Enhanced的方法
    lenth_clip = lenth // clip
    start_index_list = np.array([0 + i * lenth_clip for i in range(clip)])
    end_index_list = np.array([0 + (i + 1) * lenth_clip for i in range(clip)])
    temp=[]
    for i in range(clip):
        if is_test_Enhanced:
            range_lenth = lenth_clip * random_ratio
            start_index = np.random.randint(start_index_list[i], start_index_list[i] + range_lenth)
            end_index = np.random.randint(end_index_list[i] - range_lenth, end_index_list[i])
            start_time = data_dvs[0, start_index]
            end_time = data_dvs[0, end_index]
            dt = (end_time - start_time) // T  # 每个clip段，dt不一样，T相同
        else:
            start_time = data_dvs[0, 0]
            end_time = data_dvs[0, -1]
            dt = (end_time - start_time) // T

        tmad = get_tmad_slice(
            data_dvs[0],
            data_dvs[1:, :].T,
            start_time,
            T*dt
        )
        try:
            # print("sample_train for tmad[0:0]", tmad[0, 0])
            # print("sample_train for tmad[0:0]", tmad[0, 0])
            tmad[:, 0] -= tmad[0, 0]
        except Exception as e:
            print("error in sample_train for", hdf5_file)
            print(e)
            print('tmad.shape', tmad.shape, 'tmad', tmad)
        temp.append(tmad[:, [0, 3, 1, 2]])

    # return temp是[[[t,p,x,y],...]],[[t,p,x,y],...],...] shape(clip,事件长度,4)  s.t. (10,78438,4) ;label:int ,s.t. 4
    return temp, label, dt


def create_datasets(
        root=None,
        train=True,
        ds=1,
        transform=None,
        target_transform=None,
        n_events_attention=None,
        clip=10,
        is_train_Enhanced=False,
        is_spike=False,
        interval_scaling=False,
        T=16,
        random_ratio=0.1,
):
    size = [2, 346, 260]  # 原本的数据格式是[2,346,260]
    if n_events_attention is None:
        def default_transform():
            return Compose([
                ToTensor(),
                CenterCrop(256)
            ])
    else:
        def default_transform():
            return Compose([
                ToTensor(),
                CenterCrop(256)
            ])

    if transform is None:
        transform = default_transform()

    if target_transform is None:
        # target_transform = Compose([
        #     Repeat(T), toOneHot(12)  # num_classes:12
        # ])
        target_transform = torch.tensor

    dataset = DailyActionDataset(
        root,
        train=train,
        transform=transform,
        target_transform=target_transform,
        is_train_Enhanced=is_train_Enhanced,
        size=size,
        ds=ds,
        is_spike=is_spike,
        interval_scaling=interval_scaling,
        T=T,
        clip=clip,
        random_ratio=random_ratio,
    )
    return dataset
