import os
import numpy as np
from tqdm import tqdm


def pre_train_data():
    from random import shuffle
    txt_path = '/home/czy/HuaWei/Pointnet2.ScanNet/data/scannetv2_train_old.txt'
    ori_path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes/'
    out_path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes_train/'
    scene_list = []
    with open(txt_path) as f:
        for scene_id in f.readlines():
            scene_list.append(scene_id.strip())

    for scene_id in tqdm(scene_list):
        scene_data = np.load(os.path.join(ori_path, "{}.npy").format(scene_id))
        out_path1 = os.path.join(out_path, "{}").format(scene_id)
        out_path2 = os.path.join(out_path, "{}.npy").format(scene_id)
        np.save(out_path2, scene_data)

        n, _ = scene_data.shape
        per = np.random.permutation(n)  # 打乱后的行号
        scene_data = scene_data[per, :]
        if n <= 98000:
            continue
        # elif n <= 100000:
        #     new_data = np.array_split(scene_data, 2)
        # elif n <= 60000:
        #     new_data = np.array_split(scene_data, 3)
        # elif n <= 80000:
        #     new_data = np.array_split(scene_data, 4)
        else:
            new_data = np.array_split(scene_data, 2)

            for i, data in enumerate(new_data):
                out_path2 = out_path1 + "_" +str(i) + ".npy"
                np.save(out_path2, data)
    print('end')

def read_train_name():
    import os

    path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes_train/'
    for file_name in os.listdir(path):
        file_name_without_ext = os.path.splitext(file_name)[0]
        print(file_name_without_ext)

def pre_val_data():
    txt_path = '/home/czy/HuaWei/Pointnet2.ScanNet/data/scannetv2_val_old.txt'
    ori_path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes/'
    out_path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes_val/'
    scene_list = []
    with open(txt_path) as f:
        for scene_id in f.readlines():
            scene_list.append(scene_id.strip())
    semantic_labels_list = []
    for scene_id in tqdm(scene_list):
        scene_data = np.load(os.path.join(ori_path, "{}.npy").format(scene_id))
        label = scene_data[:, 10].astype(np.int32)
        semantic_labels_list.append(label)

    labelweights = np.zeros(20)
    for seg in semantic_labels_list:
        tmp, _ = np.histogram(seg, range(21))
        labelweights += tmp
    labelweights = labelweights.astype(np.float32)
    labelweights = labelweights / np.sum(labelweights)
    labelweights = 1 / np.log(1.2 + labelweights)

    for scene_id in tqdm(scene_list):
        scene_data = np.load(os.path.join(ori_path, "{}.npy").format(scene_id))

        out_path1 = os.path.join(out_path, "{}").format(scene_id)
        label = scene_data[:, 10].astype(np.int32)
        point_set_ini = scene_data[:, :3]  # include xyz by default
        color = scene_data[:, 3:6] / 255.  # normalize the rgb values to [0, 1]
        normal = scene_data[:, 6:9]
        point_set_ini = np.concatenate([point_set_ini, color], axis=1)
        point_set_ini = np.concatenate([point_set_ini, normal], axis=1)

        semantic_seg_ini = label.astype(np.int32)
        coordmax = point_set_ini[:, :3].max(axis=0)
        coordmin = point_set_ini[:, :3].min(axis=0)
        xlength = 1.5
        ylength = 1.5
        nsubvolume_x = np.ceil((coordmax[0] - coordmin[0]) / xlength).astype(np.int32)
        nsubvolume_y = np.ceil((coordmax[1] - coordmin[1]) / ylength).astype(np.int32)
        # point_sets = list()
        # semantic_segs = list()
        # sample_weights = list()

        for i in range(nsubvolume_x):
            for j in range(nsubvolume_y):
                out_path2 = out_path1 +'_'+str(i) +'_'+str(j)+".npy"
                curmin = coordmin + [i * xlength, j * ylength, 0]
                curmax = coordmin + [(i + 1) * xlength, (j + 1) * ylength, coordmax[2] - coordmin[2]]
                mask = np.sum((point_set_ini[:, :3] >= (curmin - 0.01)) * (point_set_ini[:, :3] <= (curmax + 0.01)),
                              axis=1) == 3
                cur_point_set = point_set_ini[mask, :]
                cur_semantic_seg = semantic_seg_ini[mask]
                if len(cur_semantic_seg) == 0:
                    continue

                choice = np.random.choice(len(cur_semantic_seg), 8192, replace=True)
                point_set = cur_point_set[choice, :]  # Nx3
                semantic_seg = cur_semantic_seg[choice]  # N
                mask = mask[choice]
                sample_weight = labelweights[semantic_seg]
                sample_weight *= mask  # N
                # # point_sets.append(np.expand_dims(point_set, 0))  # 1xNx3
                # point_sets.append(point_set)  # Nx3
                semantic_seg = np.expand_dims(semantic_seg, 1)  # 1xN
                # semantic_segs.append(semantic_seg)  # N
                sample_weight = np.expand_dims(sample_weight, 1)  # 1xN
                # sample_weights.append(sample_weight)  # N
                data_out = np.concatenate([point_set, semantic_seg], axis=1)
                data_out = np.concatenate([data_out, sample_weight], axis=1)
                np.save(out_path2, data_out)

def read_val_name():
    import os

    path = '/home/czy/HuaWei/Pointnet2.ScanNet/preprocessing/scannet_scenes_val/'
    for file_name in os.listdir(path):
        file_name_without_ext = os.path.splitext(file_name)[0]
        print(file_name_without_ext)


if __name__ == '__main__':
    pre_train_data()
    read_train_name()
    print('---------------------------------------------------------------'
          '===============================================================')
    pre_val_data()
    read_val_name()