from sklearn.neighbors import KDTree
from os.path import join, exists, dirname, abspath
import numpy as np
import pandas as pd
import os, sys, glob, pickle

BASE_DIR = dirname(abspath(__file__))   # 当前文件夹绝对路径
ROOT_DIR = dirname(BASE_DIR)            # 上一级文件夹路径    
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)               # 将目录添加到系统路径之后就可以添加自己的包
from helper_ply import write_ply        
from helper_tool import DataProcessing as DP

# dataset_path = '/data/S3DIS/Stanford3dDataset_v1.2_Aligned_Version'
dataset_path = '/data/liuxuexun/dataset/S3DIS/Stanford3dDataset_v1.2_Aligned_Version'     # 我的路径
anno_paths = [line.rstrip() for line in open(join(BASE_DIR, 'meta/anno_paths.txt'))]      # 返回一个列表,列表中的每个元素是该文件每一个行
anno_paths = [join(dataset_path, p) for p in anno_paths]                                  # 将数据集路径和txt文件中的路径结合

gt_class = [x.rstrip() for x in open(join(BASE_DIR, 'meta/class_names.txt'))]             # 将类别文件中的内容保存在一个列表中
gt_class2label = {cls: i for i, cls in enumerate(gt_class)}
# 将类别和对应的下标(索引)以键值对的方式存放到字典中，这里类别对应的序号需要与s3dis_dataset文件中类序号的定义一样

sub_grid_size = 0.04                                                                      # 网格采样时网格的边长
original_pc_folder = join(dirname(dataset_path), 'original_ply')                          # 方便下面在数据文件夹中创建一个ply目录
sub_pc_folder = join(dirname(dataset_path), 'input_{:.3f}'.format(sub_grid_size))         # 方便下面在数据文件夹中创建目录保存网格采样后的数据
os.mkdir(original_pc_folder) if not exists(original_pc_folder) else None                  # 创建对应的文件夹
os.mkdir(sub_pc_folder) if not exists(sub_pc_folder) else None
out_format = '.ply'


def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None
    """
    data_list = []

    for f in glob.glob(join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]                      # 获取类别名，忽略同一类别下的第几个物体           
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = 'clutter'
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values      # 将txt文件内容读取进来
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]     # n行1列矩阵，其中n是这个txt文件的行数，生成点的标签矩阵
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7            # 将点的标签矩阵和点的特征矩阵合并    

    pc_label = np.concatenate(data_list, 0)                                 # 将列表中的矩阵堆积起来形成一个矩阵                          
    xyz_min = np.amin(pc_label, axis=0)[0:3]                                # 取出这个room中所有数据中xyz的最小值
    pc_label[:, 0:3] -= xyz_min                                             # 进行坐标中心偏移所有点的坐标值减去最小值，确保所有坐标都是正数

    xyz = pc_label[:, :3].astype(np.float32)                                # 从矩阵截取部分所需数据
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels), ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(xyz, colors, labels, sub_grid_size)  # 进行网格采样
    sub_colors = sub_colors / 255.0                                                             # 颜色归一化
    sub_ply_file = join(sub_pc_folder, save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])    # 不再保存在savepath下了，而是保存在input0.04的文件夹下

    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(sub_pc_folder, str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))        # 从子云中查询离原始云xyz最近的点的索引列表，返回的维度为(xyz的长度,1)
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder, str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)


if __name__ == '__main__':
    # Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually.
    for annotation_path in anno_paths:
        print(annotation_path)
        elements = str(annotation_path).split('/')
        out_file_name = elements[-3] + '_' + elements[-2] + out_format
        convert_pc2ply(annotation_path, join(original_pc_folder, out_file_name))
