from sklearn.neighbors import KDTree
from os.path import join, exists, dirname, abspath
import numpy as np
import pandas as pd
import os, sys, glob, pickle

BASE_DIR = dirname(abspath(__file__))
ROOT_DIR = dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from helper_ply import write_ply
from helper_tool import DataProcessing as DP
import getpass

dataset_path = (
    "/home/" + getpass.getuser() + "/data/S3DIS/Stanford3dDataset_v1.2_Aligned_Version"
)
anno_paths = [line.rstrip() for line in open(join(BASE_DIR, "meta/anno_paths.txt"))]
anno_paths = [join(dataset_path, p) for p in anno_paths]

gt_class = [x.rstrip() for x in open(join(BASE_DIR, "meta/class_names.txt"))]
gt_class2label = {cls: i for i, cls in enumerate(gt_class)}

sub_grid_size = 0.04
original_pc_folder = join(dirname(dataset_path), "original_ply")
sub_pc_folder = join(dirname(dataset_path), "input_{:.3f}".format(sub_grid_size))
os.mkdir(original_pc_folder) if not exists(original_pc_folder) else None
os.mkdir(sub_pc_folder) if not exists(sub_pc_folder) else None
out_format = ".ply"


def convert_pc2ply(anno_path, save_path):
    """
    Convert original dataset files to ply file (each line is XYZRGBL).
    We aggregated all the points from each instance in the room.
    :param anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
    :param save_path: path to save original point clouds (each line is XYZRGBL)
    :return: None
    """
    data_list = []

    for f in glob.glob(join(anno_path, "*.txt")):
        class_name = os.path.basename(f).split("_")[0]
        if class_name not in gt_class:  # note: in some room there is 'staris' class..
            class_name = "clutter"
        pc = pd.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * gt_class2label[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # Nx7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    # 2 output1
    write_ply(
        save_path,
        (xyz, colors, labels),
        ["x", "y", "z", "red", "green", "blue", "class"],
    )

    # 3.1 output2
    # 将每个0.04m立方体内的点做均值，并统计该立方体内的每个类别的数量，将占比最大的类别作为采样后的类别.
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size
    )
    # 颜色归一化
    sub_colors = sub_colors / 255.0
    sub_ply_file = join(sub_pc_folder, save_path.split("/")[-1][:-4] + ".ply")
    write_ply(
        sub_ply_file,
        [sub_xyz, sub_colors, sub_labels],
        ["x", "y", "z", "red", "green", "blue", "class"],
    )
    # 3.2 output3 存储KDTree
    search_tree = KDTree(sub_xyz)
    kd_tree_file = join(
        sub_pc_folder, str(save_path.split("/")[-1][:-4]) + "_KDTree.pkl"
    )
    with open(kd_tree_file, "wb") as f:
        pickle.dump(search_tree, f)

    # 3.3 output4 存储投影信息
    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = join(sub_pc_folder, str(save_path.split("/")[-1][:-4]) + "_proj.pkl")
    with open(proj_save, "wb") as f:
        pickle.dump([proj_idx, labels], f)


if __name__ == "__main__":
    # Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually.
    """
    对原始数据(点和标签)进行网格采样, 并生成ply文件和KDTree文件。同时保存投影信息。
    1 input 读取点云和label
    Stanford3dDataset_v1.2_Aligned_Version/Area_1/office_16/Annotations/*.txt
    【x y z r g b】
    pandas.read_csv  - > numpy
    2 output1
    original_ply/Area_1_office_16.ply 【 x y z r g b label】: numpy.tofile  # 采样后就不再使用了
    3 对output1采样生成output2,output3,output4
    对output1采样生成output2,output3,output4,随机采样，点数约为原来的0.04倍，rgb从0~255映射到0~1
        3.1 output2
        input_0.040/Area_1_office_16.ply 【x y z r g b label】            : numpy.tofile
        train要用且只用数据【rgb和label 】                                  : pickle.load
        3.2 output3
        input_0.040/Area_1_office_16_KDTree.pkl 【x y z】                  ：pickle.dump
        train要用的数据 【x y z 】                                          : pickle.load
        3.3 output4
        input_0.040/Area_1_office_16_KDTree_proj.pkl                      ：pickle.dump
        val要用的数据【proj_id、label】                                     ：pickle.load
    """

    # 1 input 读取点云和label
    for annotation_path in anno_paths:
        print(annotation_path)
        elements = str(annotation_path).split("/")
        out_file_name = elements[-3] + "_" + elements[-2] + out_format
        convert_pc2ply(annotation_path, join(original_pc_folder, out_file_name))
