import numpy as np
import os
import torch
from torch.utils.data import Dataset

import sys
BASE = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE))

from utils import read_pickle, bbox_camera2lidar
from dataset.data_aug_hw import point_range_filter, data_augment


class BaseSampler():
    '''
    存放数据中每一簇点云的信息
    '''
    def __init__(self, sampled_list, shuffle=True):
        self.total_num = len(sampled_list)
        self.sampled_list = np.array(sampled_list)
        self.indices = np.arange(self.total_num)
        if shuffle:
            np.random.shuffle(self.indices)
        self.shuffle = shuffle
        self.idx = 0

    def sample(self, num):
        if self.idx + num < self.total_num:
            ret = self.sampled_list[self.indices[self.idx:self.idx+num]]
            self.idx += num
        else:
            ret = self.sampled_list[self.indices[self.idx:]]
            self.idx = 0
            if self.shuffle:
                np.random.shuffle(self.indices)
        return ret


class HW(Dataset):

    CLASSES = {
        'Pedestrian': 0, 
        'Cyclist': 1,
        'Car': 2,
        'Truck': 3,
        }

    # 每张图至少出现的各目标数量
    # if 目标数量不够
    #   if samples中有该类别
    #       进行补充
    #   else
    #       不补充
    SAMPLE_GROUPS = {
        'Pedestrian': 10,
        'Cyclist': 10,
        'Car': 15,
        'Truck': 3,
        }

    def __init__(self, data_root, split, model="all", sets=[]):
        assert split in ['train', 'val', 'trainval', 'test']
        assert model in ['all', 'include', 'except']
        self.data_root = data_root
        self.data_base = data_root + "/data_base"
        self.split = split
        self.data_infos = read_pickle(os.path.join(self.data_base, 'infos_{}.pkl').format(split))
        self.sorted_ids = list(self.data_infos.keys())
        db_infos = read_pickle(os.path.join(self.data_base, 'dbinfos_train.pkl'))
        db_infos = self.filter_db(db_infos)

        db_sampler = {}
        sample_groups = {}
        for cat_name in self.CLASSES:#根据目标类别分类数据集，记录数据的索引
            if cat_name in db_infos:
                db_sampler[cat_name] = BaseSampler(db_infos[cat_name], shuffle=True)
                sample_groups[cat_name] = self.SAMPLE_GROUPS[cat_name] #samples中已有的数据,才能进行采样
        self.data_aug_config=dict(
            db_sampler=dict(
                db_sampler=db_sampler,
                sample_groups=sample_groups
                ),
            object_noise=dict(
                num_try=100,
                translation_std=[0.25, 0.25, 0.25],
                rot_range=[-0.15707963267, 0.15707963267]
                ),
            random_flip_ratio=0.5,
            global_rot_scale_trans=dict(
                rot_range=[-0.78539816, 0.78539816],
                scale_ratio_range=[0.95, 1.05],
                translation_std=[0, 0, 0]
                ), 
            point_range_filter=[-20, -30, -3, 100, 30, 3],
            object_range_filter=[-20, -30, -3, 100, 30, 3]
        )

    def remove_dont_care(self, annos_info):
        keep_ids = [i for i, name in enumerate(annos_info['name']) if name != 'DontCare']
        for k, v in annos_info.items():
            annos_info[k] = v[keep_ids]
        return annos_info

    def filter_db(self, db_infos, filter_thrs = dict(Car=5, Pedestrian=10, Truck= 8, Cyclist=10)):
        # 1. filter_by_min_points, dict(Car=5, Pedestrian=10, Cyclist=10)
        for cat in self.CLASSES:
            if cat in db_infos:
                filter_thr = filter_thrs[cat]
                db_infos[cat] = [item for item in db_infos[cat] if item['num_points_in_gt'] >= filter_thr]
        
        return db_infos

    def __getitem__(self, index):
        data_info = self.data_infos[self.sorted_ids[index]]

        lidar_pts_path = os.path.join(self.data_root[:-34], data_info['lidar']['path'])
        assert os.path.exists(lidar_pts_path)
        lidar_pts = np.fromfile(lidar_pts_path).reshape(-1, 3)

        radar_pts_path = os.path.join(self.data_root[:-34], data_info['radar']['path'])
        assert os.path.exists(radar_pts_path)
        radar_pts = np.fromfile(radar_pts_path).reshape(-1, 3)

        annos_info = data_info['label']
        annos_name = annos_info['name'].reshape(-1,)
        gt_labels = [self.CLASSES.get(name, -1) for name in annos_name.reshape(1,-1)[0]]
        gt_bboxes_3d = np.concatenate((annos_info['location'], annos_info['dimensions'], annos_info['rotation_y']), axis=1)
        data_dict = {
            'lidar_pts': lidar_pts,
            'radar_pts': radar_pts,
            'gt_bboxes_3d': gt_bboxes_3d,
            'gt_labels': np.array(gt_labels),
            'gt_names': annos_name,
            'img_paths':data_info["camera"]
        }

        if self.split in ['train', 'trainval']:
            data_dict = data_augment(self.CLASSES, os.path.join(self.data_root,'data_base'), data_dict, self.data_aug_config)#数据增强
        else:
            data_dict = point_range_filter(data_dict, point_range=self.data_aug_config['point_range_filter'])

        return data_dict

    def __len__(self):
        return len(self.data_infos)

if __name__ == '__main__':
    
    hw_data = HW(data_root='/media/zwh/T7/ZWH/bags/dataset/my_data/__2023-07-03-14-07-11_back_filter',
                       split='train')
    data_dict = hw_data.__getitem__(3)

    print("aaaa")
