'''
Description: 
Author: suyunzheng
Date: 2021-11-24 16:56:18
LastEditTime: 2021-12-03 22:10:24
LastEditors: maple
'''
import os
from random import shuffle
import sys
import glob
from time import sleep
from numpy.core.fromnumeric import trace
from numpy.core.shape_base import block
from numpy.lib.function_base import select
from plyfile import PlyData
import torch
import numpy as np
import tqdm

from torch.utils.data import Dataset, dataset

from torchsparse import SparseTensor
from torchsparse.utils.collate import sparse_collate_fn
from torchsparse.utils.quantize import sparse_quantize

from lib.common.visualize import visualize_with_label
import logging

label_name_mapping = {
    0: 'unlabeled',
    1: 'outlier',
    10: 'car',
    11: 'bicycle',
    13: 'bus',
    15: 'motorcycle',
    16: 'on-rails',
    18: 'truck',
    20: 'other-vehicle',
    30: 'person',
    31: 'bicyclist',
    32: 'motorcyclist',
    40: 'road',
    44: 'parking',
    48: 'sidewalk',
    49: 'other-ground',
    50: 'building',
    51: 'fence',
    52: 'other-structure',
    60: 'lane-marking',
    70: 'vegetation',
    71: 'trunk',
    72: 'terrain',
    80: 'pole',
    81: 'traffic-sign',
    99: 'other-object',
    252: 'moving-car',
    253: 'moving-bicyclist',
    254: 'moving-person',
    255: 'moving-motorcyclist',
    256: 'moving-on-rails',
    257: 'moving-bus',
    258: 'moving-truck',
    259: 'moving-other-vehicle'
}

kept_labels = [
    'road', 'sidewalk', 'parking', 'other-ground', 'building', 'car', 'truck',
    'bicycle', 'motorcycle', 'other-vehicle', 'vegetation', 'trunk', 'terrain',
    'person', 'bicyclist', 'motorcyclist', 'fence', 'pole', 'traffic-sign'
]



class SemanticKittiDataset(Dataset):

    num_class = 19
    ignored_class = 255
    init_dim = 4
    def __init__(self, root, voxel_size, num_points, split, sample_stride=1) -> None:
        super().__init__()
        self.root = root
        self.voxel_size = voxel_size
        self.num_points = num_points
        self.split = split
        self.sample_stride = sample_stride
        self.filelist = []
        self.train_list = []
        self.test_list = []
        self.val_list = []
        self.seqs = []

        self.train_list = [
                '00', '01', '02', '03', '04', '05', '06', '07', '09', '10'
        ]
        self.test_list = [
                '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'
        ]
        self.val_list = [
                '08'
        ]

        if split == 'train':
            self.filelist = self.train_list
        elif split == 'val':
            self.filelist = self.val_list
        elif split == 'test':
            self.filelist = self.test_list
        else:
            pass

        self.files = []
        for seq in self.filelist:
            seq_files = sorted(
                os.listdir(os.path.join(self.root, seq, 'velodyne'))
            )
            seq_files = [
                os.path.join(self.root, seq, 'velodyne', x) for x in seq_files
            ]
            self.files.extend(seq_files)
        
        if self.sample_stride>1:
            self.files = self.files[::self.sample_stride]
        
        # 映射到0,1,2,...255
        reverse_label_name_mapping = {}
        self.label_map = np.zeros(260)
        cnt = 0
        for label_id in label_name_mapping:
            if label_id > 250:
                if label_name_mapping[label_id].replace('moving-', '') in kept_labels:
                    self.label_map[label_id] = reverse_label_name_mapping[label_name_mapping[label_id].replace('moving-','')]

                else:
                    self.label_map[label_id] = 255
            elif label_id == 0:
                self.label_map[label_id] = 255
            else:
                if label_name_mapping[label_id] in kept_labels:
                    self.label_map[label_id] = cnt
                    reverse_label_name_mapping[label_name_mapping[label_id]] = cnt
                    cnt+=1
                else:
                    self.label_map[label_id] = 255
        
        self.reverse_label_name_mapping  = reverse_label_name_mapping
        self.num_classed = cnt
        self.angle = 0.0
        # for i in range(self.label_map.shape[0]):
        #     # if self.label_map[i] != 0:
            # print("{}: {}".format(i, self.label_map[i]))
    
    def set_angle(self, angle):
        self.angle = angle
    
    def __len__(self):
        return len(self.files)

    def __getitem__(self, index):
        with open(self.files[index], 'rb') as b:
            block_ = np.fromfile(b, dtype=np.float32).reshape((-1,4))
        block = np.zeros_like(block_)

        if self.split == 'train':
            theta = np.random.uniform(0, 2*np.pi)
            scale_factor = np.random.uniform(0.95, 1.05)
            rot_mat = np.array([[np.cos(theta), np.sin(theta), 0],
                                [-np.sin(theta),
                                 np.cos(theta), 0], [0, 0, 1]])
            block[:, :3] = np.dot(block_[:, :3], rot_mat) * scale_factor            # 数据增强，旋转+缩放
        else:
            theta = self.angle
            transform_mat = np.array([[np.cos(theta),
                                       np.sin(theta), 0],
                                      [-np.sin(theta),
                                       np.cos(theta), 0], [0, 0, 1]])
            block[...] = block_[...]
            block[:, :3] = np.dot(block[:, :3], transform_mat)
        
        # print("===> block shape:{}".format(block.shape))
        block[:, 3] = block_[:, 3]                                                  # intensity
        pc_ = np.round(block[:, :3] / self.voxel_size).astype(np.int32)             # voxelization 
        pc_ -= pc_.min(0, keepdims=1)
        # print("===> after round, block shape:{}".format(pc_.shape))

        # target
        label_file = self.files[index].replace('velodyne', 'labels').replace(
            '.bin', '.label')
        if os.path.exists(label_file):
            with open(label_file, 'rb') as a:
                all_labels = np.fromfile(a, dtype=np.int32).reshape(-1)
        else:
            all_labels = np.zeros(pc_.shape[0]).astype(np.int32)

        labels_ = self.label_map[all_labels & 0xFFFF].astype(np.int64)

        # feats
        feat_ = block           # 坐标+强度, 数据增强处理后的特征
        
        # hash， 获取index
        _, inds, inverse_map = sparse_quantize(pc_,                               # voxel_size = 1, default, 点的数量会减少，因为有一个hash的操作，去重复的坐标
                                               return_index=True,
                                               return_inverse=True)
        # print("===> after hash block shape:{}".format(len(inds)))
        if 'train' in self.split:
            if len(inds) > self.num_points:
                inds = np.random.choice(inds, self.num_points, replace=False)

        pc = pc_[inds]
        feat = feat_[inds]
        labels = labels_[inds]
        lidar = SparseTensor(feat, pc)
        labels = SparseTensor(labels, pc)
        labels_ = SparseTensor(labels_, pc_)            # 原始分辨率的label
        inverse_map = SparseTensor(inverse_map, pc_)

        return {
            'lidar': lidar,
            'targets': labels,
            'targets_mapped': labels_,
            'inverse_map': inverse_map,
            'file_name': self.files[index]
        }

    @staticmethod
    def collate_fn(inputs):
        return sparse_collate_fn(inputs)


def getDataLoader(voxel_size: float, split: str, batch_size: int, num_points: int):
    
    voxel_size = voxel_size
    dataset = SemanticKittiDataset(
        root = "/media/ubuntu/数据/suyunzheng_dataset/polarnet/data/sequences",
        voxel_size = voxel_size,
        num_points = num_points, 
        split = split, 
        sample_stride=1
    )
    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)

    if split == 'train':
        shuffle = True
    else:
        shuffle = False
    train_loader = torch.utils.data.DataLoader(
                                                dataset, batch_size=batch_size, shuffle=shuffle, \
                                                num_workers=8, pin_memory=True, \
                                                worker_init_fn=worker_init_fn, collate_fn = dataset.collate_fn
                                                )
    return train_loader
if __name__ == "__main__":
    voxel_size = 0.05
    dataset = SemanticKittiDataset(
        root = "/media/ubuntu/数据/suyunzheng_dataset/polarnet/data/sequences",
        voxel_size = voxel_size,
        num_points = 80000, 
        split='train',
        sample_stride=1
    )
    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)
    
    train_loader  =torch.utils.data.DataLoader(
        dataset, batch_size = 2, shuffle = True, 
        num_workers = 8, pin_memory = True, 
        worker_init_fn=worker_init_fn, collate_fn = dataset.collate_fn
    )
    print("===> len(dataset):{}".format(len(dataset)))
    print("===> len(train_loader:{}".format(len(train_loader)))
    end = time.time()
    for i, ele in enumerate(train_loader):
        # print(ele['lidar'].coords)
        print(ele['lidar'].F.shape)
        print("===> time: {}/{}--{}".format(i+1, len(train_loader), time.time()-end))
        end = time.time()

