import numpy as np
import os
import torch
from torch.utils.data import Dataset

import sys
BASE = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE))

from utils import read_pickle, bbox_camera2lidar
from dataset.data_aug_hw import point_range_filter, data_augment
from dataflow.my_io import read_points
from wwengine.config import Config
from wwengine.utils import listsortdir
import json

class BaseSampler():
    def __init__(self, sampled_list, shuffle=True):
        self.total_num = len(sampled_list)
        self.sampled_list = np.array(sampled_list)
        self.indices = np.arange(self.total_num)
        if shuffle:
            np.random.shuffle(self.indices)
        self.shuffle = shuffle
        self.idx = 0

    def sample(self, num):
        if self.idx + num < self.total_num:
            ret = self.sampled_list[self.indices[self.idx:self.idx+num]]
            self.idx += num
        else:
            ret = self.sampled_list[self.indices[self.idx:]]
            self.idx = 0
            if self.shuffle:
                np.random.shuffle(self.indices)
        return ret


class HW(Dataset):

    # CLASSES = {
    #     'Pedestrian': 0,
    #     'Cyclist': 1,
    #     # 'Car': 2,
    #     'Truck': 3,
    #     'RoadBarrel': 4
    #     }

    # # 每张图至少出现的各目标数量
    # # if 目标数量不够
    # #   if samples中有该类别
    # #       进行补充
    # #   else
    # #       不补充
    # SAMPLE_GROUPS = {
    #     'Pedestrian': 10,
    #     'Cyclist': 3,
    #     'Car': 25,
    #     'Truck': 3,
    #     'RoadBarrel': 1
    #     }

    def __init__(self, cfg:dict, split = 'train'):
        self.data_root = cfg['DATAPATH']
        assert os.path.exists(self.data_root)
        self.split = split
        assert self.split in ['train', 'val', 'trainval', 'test']
        self.selectmode = cfg['SELECTMODE']
        assert self.selectmode in ['all', 'include', 'except']
        self.selected = cfg['SELECTED']

        self.CLASS2LABEL = cfg['CLASS2LABEL']

        self.aug_cfgs = cfg['AUG_CFGS']

        self.datasets_path = []#所有数据集路径
        datasets = listsortdir(self.data_root)
        for dataset in datasets:
            if "__" == dataset[:2]:  # 数据集以"__"开头
                if self.selectmode == 'all':# 全选模式,数据集全选
                    pass
                elif self.selectmode == 'include':# include模式,选择目标数据集
                    if dataset in self.selected:
                        pass
                    else:
                        continue
                elif self.selectmode == 'except':# except,剔除目标数据集
                    if dataset in self.selected:
                        continue
                    else:
                        pass
                else:
                    exit('数据选择模式{}错误'.format(self.selectmode))
                dataset_path = os.path.join(self.data_root, dataset)
                self.datasets_path.append(dataset_path)

        self.data_infos = dict()
        db_infos = dict()
        dataset_id=-1
        # 多个数据集数据统一存储和数据增强
        for dataset_path in self.datasets_path:
            dataset_id=dataset_id+1
            data_info = read_pickle(os.path.join(dataset_path, 'data_base', 'infos_{}.pkl').format(self.split))
            for key in data_info:
                new_key = str(dataset_id)+'-'+str(key) #新的key = concat(数据集id, key)
                self.data_infos[new_key] = data_info[key]
            db_info = read_pickle(os.path.join(dataset_path, 'data_base' , 'dbinfos_train.pkl'))
            for key in db_info:
                # 不同数据集中同名的sample直接拼接
                if key in db_infos.keys():
                    db_infos[key].extend(db_info[key])
                else:
                    db_infos[key] = db_info[key]

        self.sorted_ids = list(self.data_infos.keys())

        # 如果数据增强配置中存在sample配置才进行sample
        db_sampler = {}
        sample_groups = {}
        if 'SAMPLE' in self.aug_cfgs:
            sample_minpts = self.aug_cfgs['SAMPLE'].try_get('MINPTS')
            if sample_minpts:
                db_infos = self.filter_db(db_infos, filter_thrs = sample_minpts)#把点数少的sample滤除

            sample_groups_cfg = self.aug_cfgs['SAMPLE'].try_get('GROUPS')
            if sample_groups_cfg:
                for cat_name in self.CLASS2LABEL:#待识别类型,且sample中有的,才进行数据增强
                    if cat_name in db_infos:
                        db_sampler[cat_name] = BaseSampler(db_infos[cat_name], shuffle=True)
                        sample_groups[cat_name] = sample_groups_cfg[cat_name] #samples中已有的数据,才能进行采样

        self.data_aug_config=dict(
            db_sampler=dict(
                db_sampler=db_sampler,
                sample_groups=sample_groups
                ),
            object_noise=dict(
                num_try=self.aug_cfgs['OBJ_NOISE'].try_get('Num_try'),
                translation_std=self.aug_cfgs['OBJ_NOISE'].try_get('Translation_std'),
                rot_range=self.aug_cfgs['OBJ_NOISE'].try_get('Rot_range')
                ),
            random_flip_ratio=self.aug_cfgs.try_get('FLIP_RATIO'),
            global_rot_scale_trans=dict(
                rot_range=self.aug_cfgs['RST'].try_get('Rot_range'),
                scale_ratio_range=self.aug_cfgs['RST'].try_get('Scale_ratio_range'),
                translation_std=self.aug_cfgs['RST'].try_get('Translation_std'),
                ), 
            point_range_filter=self.aug_cfgs.try_get('PTS_RANGE'),
            object_range_filter=self.aug_cfgs.try_get('OBJ_RANGE')
        )

    def remove_dont_care(self, annos_info):
        keep_ids = [i for i, name in enumerate(annos_info['name']) if name != 'DontCare']
        for k, v in annos_info.items():
            annos_info[k] = v[keep_ids]
        return annos_info

    def filter_db(self, db_infos, filter_thrs = None):
        # 1. filter_by_min_points, dict(Car=5, Pedestrian=10, Cyclist=10)
        filtered_db_infos = dict()
        for cat in self.CLASS2LABEL:
            if cat in db_infos:
                filter_thr = filter_thrs[cat]
                filtered_db_infos[cat] = [item for item in db_infos[cat] if item['num_points_in_gt'] >= filter_thr]
        
        return filtered_db_infos

    def __getitem__(self, index):
        data_info = self.data_infos[self.sorted_ids[index]]

        lidar_pts_path = os.path.join(self.data_root, data_info['lidar']['path'])
        assert os.path.exists(lidar_pts_path)
        lidar_pts = read_points(lidar_pts_path, dim=3, datatype=np.float32)

        # 毫米波有可能没有
        if 'radar' in data_info.keys():
            radar_pts_path = os.path.join(self.data_root, data_info['radar']['path'])
            assert os.path.exists(radar_pts_path)
            radar_pts = read_points(radar_pts_path, dim=5, datatype=np.float32)
        else:
            radar_pts = None

        if 'semseg' in data_info.keys():
            semseg_path = os.path.join(self.data_root, data_info['semseg']['path'])
            assert os.path.exists(semseg_path)
            gt_seg = read_points(semseg_path, dim=1, datatype=np.float32)
            semseg_cls_path = os.path.join(self.data_root, data_info['semcls']['path'])
            assert os.path.exists(semseg_cls_path)
            with open(semseg_cls_path, 'r') as f:
                semcls_info = json.load(f)
        else:
            gt_seg = None
            semcls_info = {}

        if 'data_info' in data_info.keys():
            data_type = data_info['data_info']['type']
        else:
            data_type = ''

        annos_info = data_info['label']
        annos_name = annos_info['name'].reshape(-1,)
        gt_labels = np.array([self.CLASS2LABEL.get(name, -1) for name in annos_name.reshape(1,-1)[0]])
        gt_bboxes_3d = np.concatenate((annos_info['location'], annos_info['dimensions'], annos_info['rotation_y']), axis=1)
        # 只处理关注的目标
        mask = gt_labels > -1
        annos_name = annos_name[mask]
        gt_labels = gt_labels[mask]
        gt_bboxes_3d = gt_bboxes_3d[mask]
        data_dict = {
            'data_type': data_type,
            'dataset_name':os.path.basename(self.datasets_path[int(self.sorted_ids[index].split('-')[0])]),
            'data_name':self.sorted_ids[index].split('-')[1],
            'lidar_pts': lidar_pts,
            'radar_pts': radar_pts,
            'gt_seg': gt_seg,
            'segid_cls': semcls_info,
            'gt_bboxes_3d': gt_bboxes_3d,
            'gt_labels': gt_labels,
            'gt_names': annos_name,
            'img_infos':data_info['camera']
        }

        if self.split in ['train', 'trainval']:
            # data_dict = data_augment(self.CLASS2LABEL, self.data_root, data_dict, self.data_aug_config)#数据增强
            data_dict = data_dict
        else:
            data_dict = point_range_filter(data_dict, point_range=self.data_aug_config['point_range_filter'])#不管哪种数据必须经过点云范围滤波

        return data_dict

    def __len__(self):
        return len(self.data_infos)

import open3d as o3d
from dataflow.vis_utils import get_draw_box, get_arrows
if __name__ == '__main__':
    cfg = Config.fromfile('/home/adt/deeplearning/point-pilllar/config/default.json')

    hw_data = HW(cfg['DATASETS'], split='train')
    data_dict = hw_data.__getitem__(6)

    vis = o3d.visualization.Visualizer()
    vis.create_window(window_name= data_dict['dataset_name']+data_dict['data_name'])
    render_option = vis.get_render_option()
    render_option.point_size = 2
    coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=5.0, origin=[0, 0, 0])
    vis.add_geometry(coordinate_frame)

    if data_dict['lidar_pts'] is not None:
        data_lidar = o3d.geometry.PointCloud()
        data_lidar.points = o3d.utility.Vector3dVector(data_dict['lidar_pts'])
        # 可视化激光点云
        data_lidar.paint_uniform_color([0.3, 0.5, 0])
        if data_dict['gt_seg'] is not None:
            gt_seg = data_dict['gt_seg']
            for pt_id in range(gt_seg.shape[0]):
                sem = gt_seg[pt_id][0]
                if sem < 20:
                    data_lidar.colors[pt_id] = [0.05 * sem, 0, 0]
                elif sem >= 20 and sem < 40:
                    data_lidar.colors[pt_id] = [0, 0.05 * sem, 0]
                else:
                    data_lidar.colors[pt_id] = [0, 0, 0.05 * sem]
        vis.add_geometry(data_lidar)
    # # 可视化毫米波点云 注意是否数据增强
    if data_dict['radar_pts'] is not None:
        data_radar = o3d.geometry.PointCloud()
        data_radar.points = o3d.utility.Vector3dVector(data_dict['radar_pts'][:,:3])
        data_radar.paint_uniform_color([1, 0.706, 0.1])
        render_option.point_size = 5.0  # 设置渲染点的大小
        vis.add_geometry(data_radar)
    # 可视化boxes
    if data_dict['gt_bboxes_3d'] is not None:
        boxes = data_dict['gt_bboxes_3d']
        exp_draw_boxes = get_draw_box(boxes)
        arrows = get_arrows(boxes)
        box_id=-1
        for box in exp_draw_boxes:
            box_id = box_id + 1
            if 'old_bboxes_size' in data_dict.keys():
                if box_id >= data_dict['old_bboxes_size']:
                    box.paint_uniform_color([0.4, 0.2, 0.9])#数据增强的box换色
            vis.add_geometry(box)
        for arrow in arrows:
            vis.add_geometry(arrow)

    vis.run()
    vis.destroy_window()

    print("aaaa")
