import copy
import os
import random
from datetime import timedelta
from multiprocessing import Pool, Value
from time import time

# import torch
import numpy as np

# from PIL import Image
# import MinkowskiEngine as ME
# from pyquaternion import Quaternion
# from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from scipy.spatial import KDTree
from sklearn.decomposition import PCA
from tqdm import tqdm

nusc = NuScenes(version="v1.0-trainval", dataroot="datasets/nuscenes", verbose=False)

list_keyframes = []


def create_list_of_scans(scene):
    """Get first and last keyframe in the scene"""

    current_sample_token = scene["first_sample_token"]
    # Loop to get all successive keyframes
    list_data = []
    while current_sample_token != "":
        current_sample = nusc.get("sample", current_sample_token)
        list_data.append(current_sample["data"])
        current_sample_token = current_sample["next"]

    # Add new scans in this scene into the golbal list
    list_keyframes.extend(list_data)


for scene_idx in range(len(nusc.scene)):
    scene = nusc.scene[scene_idx]
    create_list_of_scans(scene)

len_s = len(list_keyframes)
Counter = Value("i", 0)


def func(data):
    # for data in list_keyframes:
    # data = list_keyframes[idx]
    pointsensor = nusc.get("sample_data", data["LIDAR_TOP"])
    pcl_path = os.path.join(nusc.dataroot, pointsensor["filename"])
    pc_original = LidarPointCloud.from_file(pcl_path)
    pc_ref = pc_original.points  # ((x,y,z,intensity:0-255),n)

    token = pointsensor["token"]

    pc_crds = pc_ref[:3, :].T

    tree = KDTree(pc_crds)

    ok_flag = np.zeros(pc_crds.shape[0], dtype=bool)

    tbn_table = np.empty((pc_crds.shape[0], 3, 3))  # tangent,bitangent,normal
    radii_table = np.empty((pc_crds.shape[0], 2))

    pca = PCA(n_components=3)

    cur_r = 0.25
    while cur_r <= 2 and not np.all(ok_flag):
        ix_to_proc = np.where(~ok_flag)[0]
        nbr_list = tree.query_ball_point(pc_crds[ix_to_proc], cur_r)
        nbr_num = np.vectorize(len)(nbr_list)
        nbr_list[nbr_num > 32] = np.vectorize(
            lambda lst: random.choices(lst, k=32), otypes=[object]
        )(nbr_list[nbr_num > 32])

        for i in np.where(nbr_num >= 3)[0]:
            data = pc_crds[nbr_list[i]]
            pca.fit(data)
            components = pca.components_
            std = np.sqrt(pca.explained_variance_)
            if std[1] >= 0.1 * cur_r:
                tbn_table[ix_to_proc[i]] = components
                radii_table[ix_to_proc[i]] = [
                    0.25 * cur_r,
                    0.25 * cur_r * (std[1] / std[0]),
                ]
                ok_flag[ix_to_proc[i]] = True

        cur_r *= 2

    tbn_table[~ok_flag] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
    radii_table[~ok_flag] = [0, 0]

    np.savez_compressed(
        "tmp/" + token + ".npz", tbn_table=tbn_table, radii_table=radii_table
    )

    Counter.value += 1
    finished_portion = Counter.value / len_s
    time_used = time() - start_time
    print(
        f"\rtime used: {timedelta(seconds=time_used)}, finished: {finished_portion:.3f}, time left: {timedelta(seconds=(time_used/finished_portion))}",
        end="",
        flush=True,
    )


start_time = time()
with Pool(20) as p:
    p.map(func, list_keyframes)
