from collections import Counter
from utils import path_util, get_input
from sklearn.cluster import DBSCAN
from tqdm import tqdm
import json
import numpy as np
import os
import pcl

# 聚类s


def dbscan_outlier_removal(points, pre_center):
    labels = DBSCAN(eps=0.3, min_samples=5).fit_predict(points)
    return points[labels != -1]
    sort_labels = [x[0] for x in Counter(labels).most_common()]
    if -1 in sort_labels:
        sort_labels.remove(-1)
    for label in sort_labels:
        cur_points = points[labels == label]
        # 初始的聚类，人是站直的，所以可以根据z的高度来把不是人的排除
        if pre_center is None:
            z_min = min(cur_points[:, 2])
            z_max = max(cur_points[:, 2])
            if z_max - z_min < 1.4:
                continue
            return points[labels == label]
        # 保证人是连续移动的，相邻两帧的位置相差不超过一定阈值
        else:
            cur_center = np.mean(cur_points, axis=0)
            if np.linalg.norm(cur_center - pre_center) > 0.5:
                print(cur_center)
                print(pre_center)
                print(len(cur_points))
                exit()
                continue
            return points[labels == label]
    return points[0].reshape(1, -1)


def erase_background(points, bg_kdtree, pre_center):
    EPSILON = 0.12
    EPSILON2 = EPSILON ** 2
    squared_distance = bg_kdtree.nearest_k_search_for_cloud(
        pcl.PointCloud(points), 1)[1].flatten()
    erased_points = points[squared_distance > EPSILON2]
    if erased_points.shape[0] == 0:
        erased_points = points[0].reshape(1, -1)
    # return erased_points
    return dbscan_outlier_removal(erased_points, pre_center)


def get_kdtree(points):
    return pcl.PointCloud(points.astype(np.float32)).make_kdtree_flann()


def crop_points(points, crop_box):
    x_min, y_min = crop_box['min']
    x_max, y_max = crop_box['max']
    mask = np.logical_and(points[:, 0] > x_min, points[:, 1] > y_min)
    mask = np.logical_and(mask, points[:, 0] < x_max)
    mask = np.logical_and(mask, points[:, 1] < y_max)
    return points[mask].copy()


def read_point_cloud(filename):
    return np.asarray(pcl.load(filename))


def generate_segment(pc_dir: str,
                     pc_indexes: np.ndarray,
                     bg_points: np.ndarray,
                     crop_box: np.ndarray):
    pc_filenames = path_util.get_sorted_filenames_by_index(pc_dir)
    pre_center = None
    reserved = []
    print(pc_dir)
    for pc_filename in tqdm(pc_filenames):
        if path_util.get_index(pc_filename) not in pc_indexes:
            continue
        if pc_filename != '/home/ljl/hdd/lidarcap/pointclouds/33/003182.pcd':
            continue
        print(pc_filename)

        bg_kdtree = get_kdtree(crop_points(bg_points, crop_box))
        lidar_points = read_point_cloud(pc_filename)[:, :3]
        lidar_points = crop_points(lidar_points, crop_box)
        lidar_points = erase_background(
            lidar_points, bg_kdtree, pre_center)
        get_input.save_point_cloud('/home/ljl/tmp/tmp.ply', lidar_points)
        exit()
        cur_reserved = lidar_points.shape[0] >= 20
        reserved.append(cur_reserved)
        if cur_reserved:
            pre_center = np.mean(lidar_points, axis=0)
    return reserved


if __name__ == '__main__':

    indexes = list(range(33, 43))
    with open('/home/ljl/hdd/raw/process_info.json') as f:
        process_info = json.load(f)
    for index in indexes:
        cur_process_info = process_info[str(index)]
        pc_indexes = [int(os.path.splitext(os.path.basename(filename))[0]) for filename in path_util.get_sorted_filenames_by_index(
            '/home/ljl/hdd/lidarcap/labels/3d/segment/{}'.format(index))]
        pc_dir = '/home/ljl/hdd/lidarcap/pointclouds/{}'.format(index)
        bg_points_path = '/home/ljl/hdd/raw/{}/bg.pcd'.format(index)
        bg_points = np.asarray(pcl.load(bg_points_path))

        generate_segment(
            pc_dir, pc_indexes, bg_points, cur_process_info['box'])
