'''
Description: 
Author: suyunzheng
Date: 2021-11-19 22:31:07
LastEditTime: 2021-12-03 22:01:00
LastEditors: maple
'''
import enum
import os
from random import shuffle
from re import I
from time import sleep
import numpy as np
import glob
from numpy.core.fromnumeric import shape
from numpy.lib.function_base import select
from numpy.lib.npyio import load
from plyfile import PlyData
from scipy.sparse import coo
import torch
from torch._C import FUSE_ADD_RELU

from tqdm import tqdm
from torch.utils.data import Dataset
from tqdm.std import trange
from lib.common.visualize import visualize_with_label

from lib.utils import load_state_with_same_shape

import logging

from torchsparse import SparseTensor
from torchsparse.utils.collate import sparse_collate_fn
from torchsparse.utils.quantize import sparse_quantize



# pointnet的处理方式，参考https://github.com/yanx27/Pointnet_Pointnet2_pytorch
class S3DISDataset(Dataset):
    def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096, test_area=5, block_size=1.0, sample_rate=1.0, transform=None):
        super().__init__()
        self.num_point = num_point
        self.block_size = block_size
        self.transform = transform
        rooms = sorted(os.listdir(data_root))
        rooms = [room for room in rooms if 'Area_' in room]
        if split == 'train':
            rooms_split = [room for room in rooms if not 'Area_{}'.format(test_area) in room]
        else:
            rooms_split = [room for room in rooms if 'Area_{}'.format(test_area) in room]

        self.room_points, self.room_labels = [], []
        self.room_coord_min, self.room_coord_max = [], []
        num_point_all = []
        labelweights = np.zeros(13)

        for room_name in tqdm(rooms_split, total=len(rooms_split)):
            room_path = os.path.join(data_root, room_name)
            room_data = np.load(room_path)  # xyzrgbl, N*7
            points, labels = room_data[:, 0:6], room_data[:, 6]  # xyzrgb, N*6; l, N
            tmp, _ = np.histogram(labels, range(14))
            labelweights += tmp
            coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
            self.room_points.append(points), self.room_labels.append(labels)
            self.room_coord_min.append(coord_min), self.room_coord_max.append(coord_max)
            num_point_all.append(labels.size)
        labelweights = labelweights.astype(np.float32)
        labelweights = labelweights / np.sum(labelweights)
        self.labelweights = np.power(np.amax(labelweights) / labelweights, 1 / 3.0)
        print(self.labelweights)
        sample_prob = num_point_all / np.sum(num_point_all)
        num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
        room_idxs = []
        for index in range(len(rooms_split)):
            room_idxs.extend([index] * int(round(sample_prob[index] * num_iter)))
        self.room_idxs = np.array(room_idxs)
        print("Totally {} samples in {} set.".format(len(self.room_idxs), split))

    def __getitem__(self, idx):
        room_idx = self.room_idxs[idx]
        points = self.room_points[room_idx]   # N * 6
        labels = self.room_labels[room_idx]   # N
        N_points = points.shape[0]

        while (True):
            center = points[np.random.choice(N_points)][:3]
            block_min = center - [self.block_size / 2.0, self.block_size / 2.0, 0]
            block_max = center + [self.block_size / 2.0, self.block_size / 2.0, 0]
            point_idxs = np.where((points[:, 0] >= block_min[0]) & (points[:, 0] <= block_max[0]) & (points[:, 1] >= block_min[1]) & (points[:, 1] <= block_max[1]))[0]
            if point_idxs.size > 1024:
                break

        if point_idxs.size >= self.num_point:
            selected_point_idxs = np.random.choice(point_idxs, self.num_point, replace=False)
        else:
            selected_point_idxs = np.random.choice(point_idxs, self.num_point, replace=True)

        # normalize
        selected_points = points[selected_point_idxs, :]  # num_point * 6
        current_points = np.zeros((self.num_point, 9))  # num_point * 9
        current_points[:, 6] = selected_points[:, 0] / self.room_coord_max[room_idx][0]
        current_points[:, 7] = selected_points[:, 1] / self.room_coord_max[room_idx][1]
        current_points[:, 8] = selected_points[:, 2] / self.room_coord_max[room_idx][2]
        selected_points[:, 0] = selected_points[:, 0] - center[0]
        selected_points[:, 1] = selected_points[:, 1] - center[1]
        selected_points[:, 3:6] /= 255.0
        current_points[:, 0:6] = selected_points
        current_labels = labels[selected_point_idxs]
        if self.transform is not None:
            current_points, current_labels = self.transform(current_points, current_labels)
        return current_points, current_labels

    def __len__(self):
        return len(self.room_idxs)

class ScannetDatasetWholeScene():
    # prepare to give prediction on each points
    def __init__(self, root, block_points=4096, split='test', test_area=5, stride=0.5, block_size=1.0, padding=0.001):
        self.block_points = block_points
        self.block_size = block_size
        self.padding = padding
        self.root = root
        self.split = split
        self.stride = stride
        self.scene_points_num = []
        assert split in ['train', 'test']
        if self.split == 'train':
            self.file_list = [d for d in os.listdir(root) if d.find('Area_%d' % test_area) is -1]
        else:
            self.file_list = [d for d in os.listdir(root) if d.find('Area_%d' % test_area) is not -1]
        self.scene_points_list = []
        self.semantic_labels_list = []
        self.room_coord_min, self.room_coord_max = [], []
        for file in self.file_list:
            data = np.load(root + file)
            points = data[:, :3]
            self.scene_points_list.append(data[:, :6])
            self.semantic_labels_list.append(data[:, 6])
            coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
            self.room_coord_min.append(coord_min), self.room_coord_max.append(coord_max)
        assert len(self.scene_points_list) == len(self.semantic_labels_list)

        labelweights = np.zeros(13)
        for seg in self.semantic_labels_list:
            tmp, _ = np.histogram(seg, range(14))
            self.scene_points_num.append(seg.shape[0])
            labelweights += tmp
        labelweights = labelweights.astype(np.float32)
        labelweights = labelweights / np.sum(labelweights)
        self.labelweights = np.power(np.amax(labelweights) / labelweights, 1 / 3.0)

    def __getitem__(self, index):
        point_set_ini = self.scene_points_list[index]
        points = point_set_ini[:,:6]
        labels = self.semantic_labels_list[index]
        coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
        grid_x = int(np.ceil(float(coord_max[0] - coord_min[0] - self.block_size) / self.stride) + 1)
        grid_y = int(np.ceil(float(coord_max[1] - coord_min[1] - self.block_size) / self.stride) + 1)
        data_room, label_room, sample_weight, index_room = np.array([]), np.array([]), np.array([]),  np.array([])
        for index_y in range(0, grid_y):
            for index_x in range(0, grid_x):
                s_x = coord_min[0] + index_x * self.stride
                e_x = min(s_x + self.block_size, coord_max[0])
                s_x = e_x - self.block_size
                s_y = coord_min[1] + index_y * self.stride
                e_y = min(s_y + self.block_size, coord_max[1])
                s_y = e_y - self.block_size
                point_idxs = np.where(
                    (points[:, 0] >= s_x - self.padding) & (points[:, 0] <= e_x + self.padding) & (points[:, 1] >= s_y - self.padding) & (
                                points[:, 1] <= e_y + self.padding))[0]
                if point_idxs.size == 0:
                    continue
                num_batch = int(np.ceil(point_idxs.size / self.block_points))
                point_size = int(num_batch * self.block_points)
                replace = False if (point_size - point_idxs.size <= point_idxs.size) else True
                point_idxs_repeat = np.random.choice(point_idxs, point_size - point_idxs.size, replace=replace)
                point_idxs = np.concatenate((point_idxs, point_idxs_repeat))
                np.random.shuffle(point_idxs)
                data_batch = points[point_idxs, :]
                normlized_xyz = np.zeros((point_size, 3))
                normlized_xyz[:, 0] = data_batch[:, 0] / coord_max[0]
                normlized_xyz[:, 1] = data_batch[:, 1] / coord_max[1]
                normlized_xyz[:, 2] = data_batch[:, 2] / coord_max[2]
                data_batch[:, 0] = data_batch[:, 0] - (s_x + self.block_size / 2.0)
                data_batch[:, 1] = data_batch[:, 1] - (s_y + self.block_size / 2.0)
                data_batch[:, 3:6] /= 255.0
                data_batch = np.concatenate((data_batch, normlized_xyz), axis=1)
                label_batch = labels[point_idxs].astype(int)
                batch_weight = self.labelweights[label_batch]

                data_room = np.vstack([data_room, data_batch]) if data_room.size else data_batch
                label_room = np.hstack([label_room, label_batch]) if label_room.size else label_batch
                sample_weight = np.hstack([sample_weight, batch_weight]) if label_room.size else batch_weight
                index_room = np.hstack([index_room, point_idxs]) if index_room.size else point_idxs
        data_room = data_room.reshape((-1, self.block_points, data_room.shape[1]))
        label_room = label_room.reshape((-1, self.block_points))
        sample_weight = sample_weight.reshape((-1, self.block_points))
        index_room = index_room.reshape((-1, self.block_points))
        return data_room, label_room, sample_weight, index_room

    def __len__(self):
        return len(self.scene_points_list)

# 修饰器，参考：https://github.com/chrischoy/SpatioTemporalSegmentation 
def cache(func):

  def wrapper(self, *args, **kwargs):
    # Assume that args[0] is index
    index = args[0]
    if self.cache:
      if index not in self.cache_dict[func.__name__]:
        results = func(self, *args, **kwargs)
        self.cache_dict[func.__name__][index] = results
      return self.cache_dict[func.__name__][index]
    else:
      return func(self, *args, **kwargs)

  return wrapper

# 结合minks:https://github.com/chrischoy/SpatioTemporalSegmentation 和 spvnas的处理方式https://github.com/mit-han-lab/spvnas
class S3DIS_sparse_Dataset(Dataset):

    num_class = 13
    ignored_class = 255
    init_dim = 6

    def __init__(self, root, voxel_size, num_points, split) -> None:
        super().__init__()
        self.root = root
        self.voxel_size = voxel_size
        self.num_points = num_points
        self.split = split
        self.filelist = []              # 当前dataset的ply文件lists
        self.train_list = []
        self.test_list = []

        # 获取label_map
        self.NUM_LABELS = 14
        self.IGNORE_LABELS = (10,)          # remove stairs, following SegCloud, 因此就变成了0-12
        self.ignore_mask = 255
        # map labels not evaluated to ignore_label
        label_map = {}
        n_used = 0
        for l in range(self.NUM_LABELS):      # NUM_LABELS=14
            if l in self.IGNORE_LABELS:         # IGNORE_LABELS=10
                label_map[l] = self.ignore_mask   # ignore_mask=255
            else:
                label_map[l] = n_used
                n_used += 1
        label_map[self.ignore_mask] = self.ignore_mask
        self.label_map = label_map
        self.NUM_LABELS -= len(self.IGNORE_LABELS)          # 14-1 = 13
        
        # 这就是处理之前的顺序！！！，0-13， 10为stairs
        self.CLASSES = [
        'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
        'stairs', 'table', 'wall', 'window'
        ]


        # 获取ply文件
        all_ply_list = glob.glob(os.path.join(self.root, "*/*.ply"))

        for file_name in all_ply_list:
            if "Area_5" in file_name:
                self.test_list.append(file_name)
            else:
                self.train_list.append(file_name)
        if split == 'train':
            self.filelist = self.train_list
            print("===> train filelist: {}".format(len(self.filelist)))

        else:
            self.filelist = self.test_list
            print("===> test filelist: {}".format(len(self.filelist)))
        
        
    def __len__(self):
        return len(self.filelist)
        pass

    # feats: color, normalize_coordinate
    def __getitem__(self, index):
        coords, feats, labels, _ = self.load_ply(index)

        # label 映射，去除stair
        if self.IGNORE_LABELS is not None:
            labels = np.array([self.label_map[x] for x in labels], dtype=np.int)
        
        # 数据增强
        if self.split == 'train':
            # print("===> data argument...")
            theta = np.random.uniform(0, 2 * np.pi)
            scale_factor = np.random.uniform(0.95, 1.05)
            rot_mat = np.array([[np.cos(theta), np.sin(theta), 0],
                                [-np.sin(theta),
                                 np.cos(theta), 0], [0, 0, 1]])

            coords[:, :3] = np.dot(coords[:, :3], rot_mat) * scale_factor            # 数据增强，旋转+缩放
        
        # 将normalize后的坐标作为feats的后三列
        feats_new = self._augment_coords_to_feats(coords=coords[:,:3], feats=feats[:, :3]/255-0.5)      # (feats, norm_coords)

        # voxelization
        pc_ = np.round(coords[:, :3] / self.voxel_size).astype(np.int32)             # voxelization 
        pc_ -= pc_.min(0, keepdims=1)

        # print("===> pc_ shape:{}".format(pc_.shape))
        _, inds, inverse_map = sparse_quantize(pc_,                               # voxel_size = 1, default, 点的数量会减少，因为有一个hash的操作，去重复的坐标
                                        return_index=True,
                                        return_inverse=True)
        # print("===> inds shape:{}".format(len(inds)))
        pc_inds = pc_[inds]
        feat_inds = feats_new[inds]                 # (feats, norm_coords)
        label_inds = labels[inds]

        lidar_sps = SparseTensor(feat_inds, pc_inds)
        labels_sps = SparseTensor(label_inds, pc_inds)
        labels_raw = SparseTensor(labels, pc_)            # 原始分辨率的label
        inverse_map = SparseTensor(inverse_map, pc_)

        return {
            'lidar': lidar_sps,
            'targets': labels_sps,
            'targets_mapped': labels_raw,
            'inverse_map': inverse_map,
            'file_name': self.filelist[index]
        }
        # return tuple(coords, feats, labels)

        pass
    
    # 汇集batch
    @staticmethod
    def collate_fn(inputs):
        return sparse_collate_fn(inputs)
    
    # @cache
    def load_ply(self, index):
        filepath = self.filelist[index]
        # print('===> loading ply: {}'.format(filepath))
        plydata = PlyData.read(filepath)
        data = plydata.elements[0].data
        coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
        feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
        labels = np.array(data['label'], dtype=np.int32)
        # print('===> coord shape: {}'.format(coords.shape) )
        # print('===> feats shape: {}'.format(feats.shape) )
        # print('===> labels shape: {}'.format(labels.shape) )

        # visualize_with_label(cloud=coords, label=labels, window_name='load_ply')
        return coords, feats, labels, None

    def _augment_coords_to_feats(self, coords, feats, labels=None):
        norm_coords = coords - coords.mean(0)
        # color must come first.
        if isinstance(coords, np.ndarray):
            feats = np.concatenate((feats, norm_coords), 1)
        else:
            feats = torch.cat((feats, norm_coords), 1)
        return feats

# sparse的collect函数
# 参考https://stackoverflow.com/questions/65279115/how-to-use-collate-fn-with-dataloaders#new-answer?newreg=8a0fa849cccd4383a8de25d28dc4e4a8， https://github.com/chrischoy/SpatioTemporalSegmentation
class cfl_collate_fn_factory:
  """Generates collate function for coords, feats, labels.

    Args:
      limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch
                       size so that the number of input coordinates is below limit_numpoints.
  """

  def __init__(self, limit_numpoints):
    self.limit_numpoints = limit_numpoints

  def __call__(self, list_data):
    coords, feats, labels = list(zip(*list_data))
    coords_batch, feats_batch, labels_batch = [], [], []

    batch_id = 0
    batch_num_points = 0
    for batch_id, _ in enumerate(coords):
      num_points = coords[batch_id].shape[0]
      batch_num_points += num_points
      if self.limit_numpoints and batch_num_points > self.limit_numpoints:
        num_full_points = sum(len(c) for c in coords)
        num_full_batch_size = len(coords)
        logging.warning(
            f'\t\tCannot fit {num_full_points} points into {self.limit_numpoints} points '
            f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'
        )
        break
      coords_batch.append(torch.from_numpy(coords[batch_id]).int())
      feats_batch.append(torch.from_numpy(feats[batch_id]))
      labels_batch.append(torch.from_numpy(labels[batch_id]).int())

      batch_id += 1

    # Concatenate all lists
    coords_batch, feats_batch, labels_batch = ME.utils.sparse_collate(coords_batch, feats_batch, labels_batch)
    return coords_batch, feats_batch.float(), labels_batch

def getDataLoader(voxel_size: float, split: str, batch_size: int):
    
    voxel_size = voxel_size
    dataset = S3DIS_sparse_Dataset(
                                    root="/media/ubuntu/数据/suyunzheng_dataset/semantic-dataset-output/spvnas_s3dis",\
                                    voxel_size=voxel_size, num_points=60000, split=split)

    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)

    if split == 'train':
        shuffle = True
    else:
        shuffle = False
    train_loader = torch.utils.data.DataLoader(
                                                dataset, batch_size=batch_size, shuffle=shuffle, \
                                                num_workers=8, pin_memory=True, \
                                                worker_init_fn=worker_init_fn, collate_fn = dataset.collate_fn
                                                )
    return train_loader
if __name__ == '__main__':
    # data_root = '/data/yxu/PointNonLocal/data/stanford_indoor3d/'
    # num_point, test_area, block_size, sample_rate = 4096, 5, 1.0, 0.01

    # point_data = S3DISDataset(split='train', data_root=data_root, num_point=num_point, test_area=test_area, block_size=block_size, sample_rate=sample_rate, transform=None)
    # print('point data size:', point_data.__len__())
    # print('point data 0 shape:', point_data.__getitem__(0)[0].shape)
    # print('point label 0 shape:', point_data.__getitem__(0)[1].shape)
    # import torch, time, random
    # manual_seed = 123
    # random.seed(manual_seed)
    # np.random.seed(manual_seed)
    # torch.manual_seed(manual_seed)``
    # torch.cuda.manual_seed_all(manual_seed)
    # def worker_init_fn(worker_id):
    #     random.seed(manual_seed + worker_id)
    # train_loader = torch.utils.data.DataLoader(point_data, batch_size=16, shuffle=True, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
    # for idx in range(4):
    #     end = time.time()
    #     for i, (input, target) in enumerate(train_loader):
    #         print('time: {}/{}--{}'.format(i+1, len(train_loader), time.time() - end))
    #         end = time.time()


    voxel_size = 0.03
    dataset = S3DIS_sparse_Dataset(
                                    root="/media/ubuntu/数据/suyunzheng_dataset/semantic-dataset-output/spvnas_s3dis",\
                                    voxel_size=voxel_size, num_points=60000, split='train')

    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)

    train_loader = torch.utils.data.DataLoader(
                                                dataset, batch_size=2, shuffle=True, \
                                                num_workers=8, pin_memory=True, \
                                                worker_init_fn=worker_init_fn, collate_fn = dataset.collate_fn
                                                )
    print("===>len(dataset):{}".format(len(dataset)))
    print("===>len(train_loader):{}".format(len(train_loader)))
    end = time.time()
    for i, element in enumerate(train_loader):
        # print(element['targets'].coords)
    
        print(element["lidar"].coords)
        
        # 可视化点云
        coords = element['lidar'].coords.numpy()
        labels = element['targets'].feats.numpy()

        coords_list0_idx = [i for i in range(coords.shape[0]) if coords[i, -1]==0]
        coords_list1_idx = [i for i in range(coords.shape[0]) if coords[i, -1]==1]
        coords_0 = coords[:, :-1][coords_list0_idx]
        coords_1 = coords[:, :-1][coords_list1_idx]
        labels_0 = labels[coords_list0_idx]
        labels_1 = labels[coords_list1_idx]
        visualize_with_label(coords_0, labels_0, window_name='{}-voxel{}'.format(element['file_name'][0], voxel_size))
        visualize_with_label(coords_1, labels_1, window_name='{}-voxel{}'.format(element['file_name'][1], voxel_size))
        


        # print(element["lidar"].feats)
        print("===> time: {}/{}--{}".format(i+1, len(train_loader), time.time()-end))
        end = time.time()

