import os
import sys
import json
import numpy as np
from torch.utils.data import Dataset
from typing import Optional, Union, List

BASE = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE))

from dataflow.my_io import read_points
from wwengine.config import Config
from wwengine.datasets import Compose
from wwengine.utils import listsortdir, read_pickle
from .builder import DATASETS


class BaseSampler():
    def __init__(self, sampled_list, shuffle=True):
        self.total_num = len(sampled_list)
        self.sampled_list = np.array(sampled_list)
        self.indices = np.arange(self.total_num)
        if shuffle:
            np.random.shuffle(self.indices)
        self.shuffle = shuffle
        self.idx = 0

    def sample(self, num):
        if self.idx + num < self.total_num:
            ret = self.sampled_list[self.indices[self.idx:self.idx + num]]
            self.idx += num
        else:
            ret = self.sampled_list[self.indices[self.idx:]]
            self.idx = 0
            if self.shuffle:
                np.random.shuffle(self.indices)
        return ret


@DATASETS.register_module()
class HW(Dataset):
    def __init__(self, data_path, select_mode, selected, split, class2label, pipeline, tasks):
        self.data_root = data_path
        assert os.path.exists(self.data_root)
        self.split = split
        assert self.split in ['train', 'val', 'trainval', 'test']
        self.selectmode = select_mode
        assert self.selectmode in ['all', 'include', 'except']
        self.selected = selected

        self.CLASS2LABEL = class2label
        for task in tasks:
            assert task in ['DET', 'SEG']  # 任务集合['DET','SEG']
        self.tasks = tasks
        # self.aug_cfgs = cfg['AUG_CFGS']

        self.datasets_path = []  # 所有数据集路径
        datasets = listsortdir(self.data_root)
        for dataset in datasets:
            if "__" == dataset[:2]:  # 数据集以"__"开头
                if self.selectmode == 'all':  # 全选模式,数据集全选
                    pass
                elif self.selectmode == 'include':  # include模式,选择目标数据集
                    if dataset in self.selected:
                        pass
                    else:
                        continue
                elif self.selectmode == 'except':  # except,剔除目标数据集
                    if dataset in self.selected:
                        continue
                    else:
                        pass
                else:
                    exit('数据选择模式{}错误'.format(self.selectmode))
                dataset_path = os.path.join(self.data_root, dataset)
                self.datasets_path.append(dataset_path)

        self.data_infos = dict()
        db_infos = dict()
        dataset_id = -1
        # 多个数据集数据统一存储和数据增强
        for dataset_path in self.datasets_path:
            dataset_id = dataset_id + 1
            data_info = read_pickle(os.path.join(dataset_path, 'data_base', 'infos_{}.pkl').format(self.split))
            for key in data_info:
                new_key = str(dataset_id) + '-' + str(key)  # 新的key = concat(数据集id, key)
                self.data_infos[new_key] = data_info[key]
            if self.split != 'train':
                continue # 只对训练集采集样本
            db_info = read_pickle(os.path.join(dataset_path, 'data_base', 'dbinfos_train.pkl'))
            for key in db_info:
                # 不同数据集中同名的sample直接拼接
                if key in db_infos.keys():
                    db_infos[key].extend(db_info[key])
                else:
                    db_infos[key] = db_info[key]

        self.sorted_ids = list(self.data_infos.keys())

        transforms = pipeline
        for transform in transforms:
            if transform['type'] == 'Dbsample':  # 样本补充类构造需要额外的参数
                transform.setdefault('class2label', self.CLASS2LABEL)
                transform.setdefault('data_root', self.data_root)
                transform.setdefault('db_infos', db_infos)
        self.compose = Compose(transforms, self.tasks)

    def remove_dont_care(self, annos_info):
        keep_ids = [i for i, name in enumerate(annos_info['name']) if name != 'DontCare']
        for k, v in annos_info.items():
            annos_info[k] = v[keep_ids]
        return annos_info

    def __getitem__(self, index):
        data_info = self.data_infos[self.sorted_ids[index]]

        lidar_pts_path = os.path.join(self.data_root, data_info['lidar']['path'])
        assert os.path.exists(lidar_pts_path)
        lidar_pts = read_points(lidar_pts_path, dim=3, datatype=np.float32)

        # 毫米波有可能没有
        if 'radar' in data_info.keys():
            radar_pts_path = os.path.join(self.data_root, data_info['radar']['path'])
            assert os.path.exists(radar_pts_path)
            radar_pts = read_points(radar_pts_path, dim=5, datatype=np.float32)
        else:
            radar_pts = None

        if 'semseg' in data_info.keys():
            semseg_path = os.path.join(self.data_root, data_info['semseg']['path'])
            assert os.path.exists(semseg_path)
            gt_seg = read_points(semseg_path, dim=1, datatype=np.float32)
            semseg_cls_path = os.path.join(self.data_root, data_info['semcls']['path'])
            assert os.path.exists(semseg_cls_path)
            with open(semseg_cls_path, 'r') as f:
                semcls_info = json.load(f)
        else:
            gt_seg = None
            semcls_info = {}

        if 'data_info' in data_info.keys():
            data_type = data_info['data_info']['type']
        else:
            data_type = ''

        annos_info = data_info['label']
        annos_name = annos_info['name'].reshape(-1, )
        gt_labels = np.array([self.CLASS2LABEL.get(name, -1) for name in annos_name.reshape(1, -1)[0]])
        gt_bboxes_3d = np.concatenate((annos_info['location'], annos_info['dimensions'], annos_info['rotation_y']),
                                      axis=1)
        # 只处理关注的目标
        mask = gt_labels > -1
        annos_name = annos_name[mask]
        gt_labels = gt_labels[mask]
        gt_bboxes_3d = gt_bboxes_3d[mask]
        data_dict = {
            'data_type': data_type,
            'dataset_name': os.path.basename(self.datasets_path[int(self.sorted_ids[index].split('-')[0])]),
            'data_name': self.sorted_ids[index].split('-')[1],
            'lidar_pts': lidar_pts,
            'radar_pts': radar_pts,
            'gt_seg': gt_seg,
            'segid_cls': semcls_info,
            'gt_bboxes_3d': gt_bboxes_3d,
            'gt_labels': gt_labels,
            'gt_names': annos_name,
            'img_infos': data_info['camera']
        }

        if self.split in ['train', 'trainval']:
            # data_dict = data_augment(self.CLASS2LABEL, self.data_root, data_dict, self.data_aug_config)#数据增强
            data_dict = self.compose(data_dict)
        # else:
        #     data_dict = point_range_filter(data_dict, point_range=self.data_aug_config['point_range_filter'])#不管哪种数据必须经过点云范围滤波
        return data_dict

    def __len__(self):
        return len(self.data_infos)


import open3d as o3d
from dataflow.vis_utils import get_draw_box

if __name__ == '__main__':
    cfg = Config.fromfile('/home/zwh/work_space/point-pilllar/config/default.json')

    hw_data = HW(cfg['DATASETS'], split='train')
    data_dict = hw_data.__getitem__(11)

    vis = o3d.visualization.Visualizer()
    vis.create_window(window_name=data_dict['dataset_name'] + data_dict['data_name'])
    render_option = vis.get_render_option()
    render_option.point_size = 2
    coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
    vis.add_geometry(coordinate_frame)

    # 可视化激光点云
    if data_dict['lidar_pts'] is not None:
        data_lidar = o3d.geometry.PointCloud()
        data_lidar.points = o3d.utility.Vector3dVector(data_dict['lidar_pts'])
        data_lidar.paint_uniform_color([0.3, 0.5, 0])
        vis.add_geometry(data_lidar)
    # # 可视化毫米波点云 注意是否数据增强
    if data_dict['radar_pts'] is not None:
        data_radar = o3d.geometry.PointCloud()
        data_radar.points = o3d.utility.Vector3dVector(data_dict['radar_pts'][:, :3])
        data_radar.paint_uniform_color([1, 0.706, 0.1])
        render_option.point_size = 5.0  # 设置渲染点的大小
        vis.add_geometry(data_radar)
    # 可视化boxes
    if data_dict['gt_bboxes_3d'] is not None:
        boxes = data_dict['gt_bboxes_3d']
        exp_draw_boxes = get_draw_box(boxes)
        box_id = -1
        for box in exp_draw_boxes:
            box_id = box_id + 1
            if 'old_bboxes_size' in data_dict.keys():
                if box_id >= data_dict['old_bboxes_size']:
                    box.paint_uniform_color([0.4, 0.2, 0.9])  # 数据增强的box换色
            vis.add_geometry(box)

    vis.run()
    vis.destroy_window()

    print("aaaa")
