import numpy as np
import time
from Neural_Overlapping_Quantization.util import dir_io, vecs_io
import sklearn.cluster as cls
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple_overlap_boundary"
}
'''
max_iter = 40
overlap_proportion = 0.5


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']
    project_dir = config['project_dir']

    res_intermediate = {'overlap_proportion': overlap_proportion, 'max_iter': max_iter}

    multi_codebook_model = cls.KMeans(n_clusters=n_cluster * n_classifier, init='k-means++', max_iter=max_iter)

    kmeans_start_time = time.time()
    print("start generate joint codebook")
    multi_codebook_model.fit(base)
    print("end generate joint codebook")
    kmeans_end_time = time.time()
    res_intermediate['build_codebook_time'] = kmeans_end_time - kmeans_start_time

    centroids = multi_codebook_model.cluster_centers_
    centroid_l_l, rp_time = assign_centroid(centroids, n_classifier, n_cluster)
    res_intermediate['random_projection_time'] = rp_time

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        centroid_l = centroid_l_l[i]

        single_label_start_time = time.time()
        res_labels, res_nearest_neighbor_distance_l, res_distance_idx_l = parallel_single_label(base, centroid_l)
        save_label_distance(res_labels, res_nearest_neighbor_distance_l, res_distance_idx_l, n_cluster, save_dir)
        single_label_end_time = time.time()
        tmp_intermediate['single_label_time'] = single_label_end_time - single_label_start_time

        overlap_label_start_time = time.time()
        label = get_overlap_label(save_dir, project_dir, n_cluster, len(base))
        overlap_label_end_time = time.time()
        tmp_intermediate['overlap_label_time'] = overlap_label_end_time - overlap_label_start_time

        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def get_overlap_label(save_dir, project_dir, n_cluster, n_item):
    command = '%s/Neural_Overlapping_Quantization/CPP_Project/kmeans_multiple_overlap_boundary/build/overlap %s %.3f' % (
        project_dir, save_dir, overlap_proportion)
    os.system(command)
    label = np.loadtxt("%s/partition.txt" % save_dir).astype(np.int64)
    return label


def save_label_distance(res_labels, res_nearest_neighbor_distance_l, res_distance_idx_l, n_cluster, save_dir):
    np.savetxt('%s/single_partition.txt' % save_dir, res_labels, fmt='%d')

    vecs_io.ivecs_write('%s/distance2nearest_centroid_idx.ivecs' % save_dir, res_distance_idx_l)

    cluster_len_l = []
    distance_sort_idx_l = np.zeros(shape=len(res_labels), dtype=np.int64)
    for cluster_i in range(n_cluster):
        cluster_idx_l = np.where(res_labels == cluster_i)[0]
        cluster_len = len(cluster_idx_l)
        cluster_len_l.append(cluster_len)
        distance_idx_l = np.argsort(res_nearest_neighbor_distance_l[cluster_idx_l])
        for i in range(cluster_len):
            distance_sort_idx_l[cluster_idx_l[i]] = distance_idx_l[i]

    with open('%s/distance_idx.txt' % save_dir, 'w') as f:
        f.write("%d %d \n" % (n_cluster, len(res_labels)))
        cluster_len_str = ""
        for length in cluster_len_l:
            cluster_len_str += str(length) + " "
        cluster_len_str += "\n"
        f.write(cluster_len_str)

        idx_str_l = ""
        for idx in distance_sort_idx_l:
            idx_str_l += str(idx) + "\n"
        f.write(idx_str_l)
        f.close()


def parallel_single_label(data, centroid_l):
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_labels = np.zeros(data.shape[0]).astype(np.int64)
    res_nearest_neighbor_distance_l = np.zeros(data.shape[0]).astype(np.float64)
    res_distance_idx_l = np.zeros(shape=(data.shape[0], len(centroid_l)), dtype=np.int64)
    for i, res in enumerate(res_l, 0):
        tmp_res = res.get()
        tmp_labels, tmp_nearest_neighbor_distance_l, tmp_distance_idx_l = tmp_res
        for j in range(len(tmp_labels)):
            res_labels[i + j * n_process] = tmp_labels[j]
            res_nearest_neighbor_distance_l[i + j * n_process] = tmp_nearest_neighbor_distance_l[j]
            res_distance_idx_l[i + j * n_process] = tmp_distance_idx_l[j]
    return res_labels, res_nearest_neighbor_distance_l, res_distance_idx_l


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    labels = []
    nearest_neighbor_distance_l = []
    distance_idx_l = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        tmp_distance_idx = np.argsort(tmp_dis)
        tmp_label = tmp_distance_idx[0]
        min_distance = tmp_dis[tmp_label]
        labels.append(tmp_label)
        nearest_neighbor_distance_l.append(min_distance)
        distance_idx_l.append(tmp_distance_idx)
    return np.array(labels, dtype=np.int64), np.array(nearest_neighbor_distance_l, dtype=np.float64) \
        , np.array(distance_idx_l, dtype=np.int64)


def assign_centroid(centroids, n_classifier, n_cluster):
    def random_projection(centroid_l, n_classifier, n_cluster):
        res_idx = np.arange(n_classifier * n_cluster)
        divide_and_conquer(0, n_cluster, centroid_l, 0, len(centroid_l), res_idx)
        return res_idx

    def divide_and_conquer(depth, k, centroid_l, start, end, res_idx):
        if 2 ** depth == k:
            return
        # vector = np.random.rand(centroid_l.shape[1])
        # random_vector = vector / np.linalg.norm(vector)
        random_vector = np.random.normal(size=centroid_l.shape[1], scale=100)
        random_l = []
        for i in range(start, end):
            random_num = np.dot(random_vector, centroid_l[res_idx[i]])
            random_l.append(random_num)
        # random_l is the result of dot product of centroid and random vector(follow Gauss distribution)
        random_l = np.array(random_l)
        depth += 1
        sort_indices = np.argsort(random_l) + start
        mid = int((start + end - 1) / 2)
        res_idx[start:end] = sort_indices
        divide_and_conquer(depth, k, centroid_l, start, mid, res_idx)
        divide_and_conquer(depth, k, centroid_l, mid, end, res_idx)

    # generate random permutation after got the group
    def get_total_permutation(n_classifier, n_cluster):
        # random select
        total_permutation = None  # n_cluster * n_classifier
        for i in range(n_cluster):
            arr = np.random.permutation(n_classifier)
            arr = np.array([arr])
            if i == 0:
                total_permutation = arr
            else:
                total_permutation = np.append(total_permutation, arr, axis=0)
        return total_permutation

    # to see the initial distribution of label
    # get_label(model.labels_)
    # print("centroids", centroids[:, :2])
    rp_start_time = time.time()
    centroid_sort_idx = random_projection(centroids, n_classifier, n_cluster)
    rp_end_time = time.time()
    # use random_projection() to sort the array, to fit the shape k * m. k groups with m points in each group
    centroid_sort_idx = centroid_sort_idx.reshape(n_cluster, -1)

    total_permutation = get_total_permutation(n_classifier, n_cluster)

    # here extract a vector for each group and we get the k vectors. Use the k vectors as the centroid of the model
    centroid_l_l = []
    for i in range(n_classifier):
        model_centroid_l = []
        for j in range(n_cluster):
            # idx = centroid_sort_idx[j][i]
            idx = centroid_sort_idx[j][total_permutation[j][i]]
            model_centroid_l.append(centroids[idx])
        centroid_l_l.append(model_centroid_l)
    centroid_l_l = np.array(centroid_l_l)
    return centroid_l_l, rp_end_time - rp_start_time
