# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import sys

sys.path.append(["../.."])
import logging
import os
import os.path as osp
import pickle

import numpy as np
import tqdm
from sklearn.model_selection import train_test_split

root_path = "./"
stat_path = osp.join(root_path, "statistics")
setup_file = osp.join(stat_path, "setup.txt")
camera_file = osp.join(stat_path, "camera.txt")
performer_file = osp.join(stat_path, "performer.txt")
replication_file = osp.join(stat_path, "replication.txt")
label_file = osp.join(stat_path, "label.txt")
skes_name_file = osp.join(stat_path, "skes_available_name.txt")

denoised_path = osp.join(root_path, "denoised_data")
raw_skes_joints_pkl = osp.join(denoised_path, "raw_denoised_joints.pkl")
frames_file = osp.join(denoised_path, "frames_cnt.txt")

save_path = "./"


if not osp.exists(save_path):
    os.mkdir(save_path)


def remove_nan_frames(ske_name, ske_joints, nan_logger):
    num_frames = ske_joints.shape[0]
    valid_frames = []

    for f in range(num_frames):
        if not np.any(np.isnan(ske_joints[f])):
            valid_frames.append(f)
        else:
            nan_indices = np.where(np.isnan(ske_joints[f]))[0]
            nan_logger.info("{}\t{:^5}\t{}".format(ske_name, f + 1, nan_indices))

    return ske_joints[valid_frames]


def seq_translation(skes_joints):
    for idx, ske_joints in enumerate(skes_joints):
        num_frames = ske_joints.shape[0]
        num_bodies = 1 if ske_joints.shape[1] == 75 else 2
        if num_bodies == 2:
            missing_frames_1 = np.where(ske_joints[:, :75].sum(axis=1) == 0)[0]
            missing_frames_2 = np.where(ske_joints[:, 75:].sum(axis=1) == 0)[0]
            cnt1 = len(missing_frames_1)
            cnt2 = len(missing_frames_2)

        i = 0  # get the "real" first frame of actor1
        while i < num_frames:
            if np.any(ske_joints[i, :75] != 0):
                break
            i += 1

        origin = np.copy(ske_joints[i, 3:6])  # new origin: joint-2

        for f in range(num_frames):
            if num_bodies == 1:
                ske_joints[f] -= np.tile(origin, 25)
            else:  # for 2 actors
                ske_joints[f] -= np.tile(origin, 50)

        if (num_bodies == 2) and (cnt1 > 0):
            ske_joints[missing_frames_1, :75] = np.zeros((cnt1, 75), dtype=np.float32)

        if (num_bodies == 2) and (cnt2 > 0):
            ske_joints[missing_frames_2, 75:] = np.zeros((cnt2, 75), dtype=np.float32)

        skes_joints[idx] = ske_joints  # Update

    return skes_joints


def frame_translation(skes_joints, skes_name, frames_cnt):
    nan_logger = logging.getLogger("nan_skes")
    nan_logger.setLevel(logging.INFO)
    nan_logger.addHandler(logging.FileHandler("./nan_frames.log"))
    nan_logger.info("{}\t{}\t{}".format("Skeleton", "Frame", "Joints"))

    for idx, ske_joints in enumerate(skes_joints):
        num_frames = ske_joints.shape[0]
        # Calculate the distance between spine base (joint-1) and spine (joint-21)
        j1 = ske_joints[:, 0:3]
        j21 = ske_joints[:, 60:63]
        dist = np.sqrt(((j1 - j21) ** 2).sum(axis=1))

        for f in range(num_frames):
            origin = ske_joints[f, 3:6]  # new origin: middle of the spine (joint-2)
            if (ske_joints[f, 75:] == 0).all():
                ske_joints[f, :75] = (ske_joints[f, :75] - np.tile(origin, 25)) / dist[
                    f
                ] + np.tile(origin, 25)
            else:
                ske_joints[f] = (ske_joints[f] - np.tile(origin, 50)) / dist[
                    f
                ] + np.tile(origin, 50)

        ske_name = skes_name[idx]
        ske_joints = remove_nan_frames(ske_name, ske_joints, nan_logger)
        frames_cnt[idx] = num_frames  # update valid number of frames
        skes_joints[idx] = ske_joints

    return skes_joints, frames_cnt


def align_frames(skes_joints, frames_cnt):
    """
    Align all sequences with the same frame length.

    """
    num_skes = len(skes_joints)
    max_num_frames = frames_cnt.max()  # 300
    aligned_skes_joints = np.zeros((num_skes, max_num_frames, 150), dtype=np.float32)

    for idx, ske_joints in enumerate(skes_joints):
        num_frames = ske_joints.shape[0]
        num_bodies = 1 if ske_joints.shape[1] == 75 else 2
        if num_bodies == 1:
            aligned_skes_joints[idx, :num_frames] = np.hstack(
                (ske_joints, np.zeros_like(ske_joints))
            )
        else:
            aligned_skes_joints[idx, :num_frames] = ske_joints

    return aligned_skes_joints


def one_hot_vector(labels):
    num_skes = len(labels)
    labels_vector = np.zeros((num_skes, 60))
    for idx, label in enumerate(labels):
        labels_vector[idx, label] = 1

    return labels_vector


def split_train_val(train_indices, method="sklearn", ratio=0.05):
    """
    Get validation set by splitting data randomly from training set with two methods.
    In fact, I thought these two methods are equal as they got the same performance.

    """
    if method == "sklearn":
        return train_test_split(train_indices, test_size=ratio, random_state=10000)
    else:
        np.random.seed(10000)
        np.random.shuffle(train_indices)
        val_num_skes = int(np.ceil(0.05 * len(train_indices)))
        val_indices = train_indices[:val_num_skes]
        train_indices = train_indices[val_num_skes:]
        return train_indices, val_indices


def split_dataset(skes_joints, label, performer, camera, evaluation, save_path):
    train_indices, test_indices = get_indices(performer, camera, evaluation)
    # Select validation set from training set
    # train_indices, val_indices = split_train_val(train_indices, m)

    # Save labels and num_frames for each sequence of each data set
    train_labels = label[train_indices]
    test_labels = label[test_indices]

    train_x = skes_joints[train_indices]
    train_y = one_hot_vector(train_labels)
    test_x = skes_joints[test_indices]
    test_y = one_hot_vector(test_labels)

    save_name = "NTU60_%s.npz" % evaluation
    np.savez(save_name, x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y)

    # Save data into a .h5 file
    # h5file = h5py.File(osp.join(save_path, 'NTU_%s.h5' % (evaluation)), 'w')
    # Training set
    # h5file.create_dataset('x', data=skes_joints[train_indices])
    # train_one_hot_labels = one_hot_vector(train_labels)
    # h5file.create_dataset('y', data=train_one_hot_labels)
    # Validation set
    # h5file.create_dataset('valid_x', data=skes_joints[val_indices])
    # val_one_hot_labels = one_hot_vector(val_labels)
    # h5file.create_dataset('valid_y', data=val_one_hot_labels)
    # Test set
    # h5file.create_dataset('test_x', data=skes_joints[test_indices])
    # test_one_hot_labels = one_hot_vector(test_labels)
    # h5file.create_dataset('test_y', data=test_one_hot_labels)

    # h5file.close()


def get_indices(performer, camera, evaluation="CS"):
    test_indices = np.empty(0)
    train_indices = np.empty(0)

    if evaluation == "CS":  # Cross Subject (Subject IDs)
        train_ids = [
            1,
            2,
            4,
            5,
            8,
            9,
            13,
            14,
            15,
            16,
            17,
            18,
            19,
            25,
            27,
            28,
            31,
            34,
            35,
            38,
        ]
        test_ids = [
            3,
            6,
            7,
            10,
            11,
            12,
            20,
            21,
            22,
            23,
            24,
            26,
            29,
            30,
            32,
            33,
            36,
            37,
            39,
            40,
        ]

        # Get indices of test data
        for idx in test_ids:
            temp = np.where(performer == idx)[0]  # 0-based index
            test_indices = np.hstack((test_indices, temp)).astype(np.int32)

        # Get indices of training data
        for train_id in train_ids:
            temp = np.where(performer == train_id)[0]  # 0-based index
            train_indices = np.hstack((train_indices, temp)).astype(np.int32)
    else:  # Cross View (Camera IDs)
        train_ids = [2, 3]
        test_ids = 1
        # Get indices of test data
        temp = np.where(camera == test_ids)[0]  # 0-based index
        test_indices = np.hstack((test_indices, temp)).astype(np.int32)

        # Get indices of training data
        for train_id in train_ids:
            temp = np.where(camera == train_id)[0]  # 0-based index
            train_indices = np.hstack((train_indices, temp)).astype(np.int32)

    return train_indices, test_indices


def align_skeleton(data):
    N, C, T, V, M = data.shape
    trans_data = np.zeros_like(data)
    for i in tqdm.tqdm(range(N)):
        for p in range(M):
            sample = data[i][..., p]
            # if np.all((sample[:,0,:] == 0)):
            # continue
            v1 = sample[:, 0, 1] - sample[:, 0, 0]
            if np.linalg.norm(v1) <= 0.0:
                continue
            v1 = v1 / np.linalg.norm(v1)
            v2_ = sample[:, 0, 12] - sample[:, 0, 16]
            proj_v2_v1 = np.dot(v1.T, v2_) * v1 / np.linalg.norm(v1)
            v2 = v2_ - np.squeeze(proj_v2_v1)
            v2 = v2 / (np.linalg.norm(v2))
            v3 = np.cross(v2, v1) / (np.linalg.norm(np.cross(v2, v1)))
            v1 = np.reshape(v1, (3, 1))
            v2 = np.reshape(v2, (3, 1))
            v3 = np.reshape(v3, (3, 1))

            R = np.hstack([v2, v3, v1])
            for t in range(T):
                trans_sample = (np.linalg.inv(R)) @ (sample[:, t, :])  # -d
                trans_data[i, :, t, :, p] = trans_sample
    return trans_data


def create_aligned_dataset(
    file_list=["data/ntu/NTU60_CS.npz", "data/ntu/NTU60_CV.npz"],
):
    for file in file_list:
        org_data = np.load(file)
        splits = ["x_train", "x_test"]
        aligned_set = {}
        for split in splits:
            data = org_data[split]
            N, T, _ = data.shape
            data = data.reshape((N, T, 2, 25, 3)).transpose(0, 4, 1, 3, 2)
            aligned_data = align_skeleton(data)
            aligned_data = aligned_data.transpose(0, 2, 4, 3, 1).reshape(N, T, -1)
            aligned_set[split] = aligned_data

        np.savez(
            file.replace(".npz", "_aligned.npz"),
            x_train=aligned_set["x_train"],
            y_train=org_data["y_train"],
            x_test=aligned_set["x_test"],
            y_test=org_data["y_test"],
        )


if __name__ == "__main__":
    camera = np.loadtxt(camera_file, dtype=np.int32)  # camera id: 1, 2, 3
    performer = np.loadtxt(performer_file, dtype=np.int32)  # subject id: 1~40
    label = np.loadtxt(label_file, dtype=np.int32) - 1  # action label: 0~59

    frames_cnt = np.loadtxt(frames_file, dtype=np.int32)  # frames_cnt
    skes_name = np.loadtxt(skes_name_file, dtype=np.bytes_)

    with open(raw_skes_joints_pkl, "rb") as fr:
        skes_joints = pickle.load(fr)  # a list

    skes_joints = seq_translation(skes_joints)

    skes_joints = align_frames(
        skes_joints, frames_cnt
    )  # aligned to the same frame length

    evaluations = ["CS", "CV"]
    for evaluation in evaluations:
        split_dataset(skes_joints, label, performer, camera, evaluation, save_path)

    create_aligned_dataset(file_list=["NTU60_CS.npz", "NTU60_CV.npz"])
