'''
This script is used to transfer the aedat4 format in SODFormer to the h5 format, which is often used in recent works.
restruction of the outcomes:
-events
    -x
    -y
    -height
    -width
    -p
    -t
-aps
    -image
    -timestamp
    -height
    -width
Author: Yuyang
Time: 2024/03/11
'''
#import dv_processing as dv
import cv2 as cv
import h5py
#import hdf5plugin
import numpy as np
import torch
import os
import glob
import argparse
from tqdm import tqdm

from metavision_core.event_io import EventsIterator

torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)


pku_scenes = ['normal', 'low_light', 'motion_blur']
pku_splits = ['train', 'val']


def parse_argument():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    ## dir params
    parser.add_argument('--scene', type=str,
                        default='normal',
                        help='',choices=['normal', 'low_light', 'motion_blur','all'])
    parser.add_argument('--base_file_dir', type=str,
                        default='E:/Datasets/lastdata/raw',
                        help='The name of a raw dir')
    parser.add_argument('--base_save_dir', type=str,
                        default='E:/Datasets/lastdata/H5',
                        help='')

    parser.add_argument('--show', default=False, help='if show detection and tracking process')
    return parser


# def get_reader(file_path):
#     assert os.path.exists(file_path), 'The file /'{}/' is not exist'.format(file_path)
#     camera_reader = dv.io.MonoCameraRecording(file_path)
#
#     return camera_reader
def get_reader(file_path, time_interval_us):
    assert os.path.exists(file_path), 'The file \'{}\' is not exist'.format(file_path)
    mv_iterator = EventsIterator(input_path=file_path, delta_t=time_interval_us)

    return mv_iterator

if __name__ == '__main__':
    ## Get params
    args, _ = parse_argument().parse_known_args(None)
    print(args)


    file_dir_list = []
    save_dir_list = []
    if args.scene != 'all':
        assert args.scene in pku_scenes
        for split in pku_splits:
            file_dir_list.append(os.path.join(args.base_file_dir, split, args.scene))
            save_dir_list.append(os.path.join(args.base_save_dir, split, args.scene))
    elif args.scene == 'all':
        for scene in pku_scenes:
            for split in pku_splits:
                file_dir_list.append(os.path.join(args.base_file_dir, split, scene))
                save_dir_list.append(os.path.join(args.base_save_dir, split, scene))


    assert len(file_dir_list) == len(save_dir_list)

    for file_dir, save_dir in zip(file_dir_list, save_dir_list):
        assert os.path.exists(file_dir)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)


        file_name_list = glob.glob(os.path.join(file_dir, '*.raw'))
        for file_name in tqdm(file_name_list):

            file_base_name = os.path.basename(file_name)
            # print('\nfile_base_name: ', file_base_name)


            ## Create raw reader
            # reader = get_reader(file_name)
            # camera_name = reader.getCameraName()
            # width, height = reader.getFrameResolution()
            reader = get_reader(file_name, 20000)
            height, width = reader.get_size()

            evs_list = []
            for evs in reader:
                min_t, max_t = np.min(evs['t']), np.max(evs['t'])
                evs_list.append(evs)

            events = np.concatenate(evs_list)
            print(events.shape)


            events_dict = {
                'x': events['x'].astype(np.uint16),
                'y': events['y'].astype(np.uint16),
                'p': (events['p']*2-1).astype(np.int8),
                't': events['t'].astype(np.int64),
                'height': np.int32(height),
                'width': np.int32(width)
            }

            init_time = np.min(events_dict['t'])

            events_dict['t'] = events_dict['t'] - init_time

            h5_file_path = '{}/{}.h5'.format(save_dir, file_base_name)
            h5_file = h5py.File(h5_file_path, 'w')

            h5_event_group = h5_file.create_group('events')
            for k, v in events_dict.items():
                # print(k)
                # print(v.shape)
                # print(v.dtype)
                h5_event_group.create_dataset(name='{}'.format(k), data=v, dtype=v.dtype, chunks=v.shape)



            h5_file.close()





















