import numpy as np
import time
from Neural_Quantization.util import dir_io
import sklearn.cluster as cls
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple"
}
'''
max_iter = 40


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    multi_codebook_model = cls.KMeans(n_clusters=n_cluster * n_classifier, init='k-means++', max_iter=max_iter)

    kmeans_start_time = time.time()
    print("start generate joint codebook")
    multi_codebook_model.fit(base)
    print("end generate joint codebook")
    kmeans_end_time = time.time()
    res_intermediate['build_codebook_time'] = kmeans_end_time - kmeans_start_time

    centroids = multi_codebook_model.cluster_centers_
    centroid_l_l, rp_time = assign_centroid(centroids, n_classifier, n_cluster)
    res_intermediate['random_projection_time'] = rp_time

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        centroid_l = centroid_l_l[i]

        label, time_consumed = parallel_get_label(base, centroid_l, save_dir)

        tmp_intermediate['count_label_time'] = time_consumed
        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def parallel_get_label(data, centroid_l, save_dir):
    start_time = time.time()
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_labels = np.zeros(data.shape[0]).astype(np.int64)
    for i, res in enumerate(res_l, 0):
        tmp_labels = res.get()
        for j in range(len(tmp_labels)):
            res_labels[i + j * n_process] = tmp_labels[j]
    np.savetxt('%s/partition.txt' % save_dir, res_labels, fmt='%d')
    end_time = time.time()
    time_consumed = end_time - start_time
    return res_labels, time_consumed


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    labels = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        tmp_label = np.argmin(tmp_dis, axis=0)
        labels.append(tmp_label)
    return np.array(labels, dtype=np.int64)


def assign_centroid(centroids, n_classifier, n_cluster):
    def random_projection(centroid_l, n_classifier, n_cluster):
        res_idx = np.arange(n_classifier * n_cluster)
        divide_and_conquer(0, n_cluster, centroid_l, 0, len(centroid_l), res_idx)
        return res_idx

    def divide_and_conquer(depth, k, centroid_l, start, end, res_idx):
        if 2 ** depth == k:
            return
        # vector = np.random.rand(centroid_l.shape[1])
        # random_vector = vector / np.linalg.norm(vector)
        random_vector = np.random.normal(size=centroid_l.shape[1], scale=100)
        random_l = []
        for i in range(start, end):
            random_num = np.dot(random_vector, centroid_l[res_idx[i]])
            random_l.append(random_num)
        # random_l is the result of dot product of centroid and random vector(follow Gauss distribution)
        random_l = np.array(random_l)
        depth += 1
        sort_indices = np.argsort(random_l) + start
        mid = int((start + end - 1) / 2)
        res_idx[start:end] = sort_indices
        divide_and_conquer(depth, k, centroid_l, start, mid, res_idx)
        divide_and_conquer(depth, k, centroid_l, mid, end, res_idx)

    # generate random permutation after got the group
    def get_total_permutation(n_classifier, n_cluster):
        # random select
        total_permutation = None  # n_cluster * n_classifier
        for i in range(n_cluster):
            arr = np.random.permutation(n_classifier)
            arr = np.array([arr])
            if i == 0:
                total_permutation = arr
            else:
                total_permutation = np.append(total_permutation, arr, axis=0)
        return total_permutation

    # to see the initial distribution of label
    # get_label(model.labels_)
    # print("centroids", centroids[:, :2])
    rp_start_time = time.time()
    centroid_sort_idx = random_projection(centroids, n_classifier, n_cluster)
    rp_end_time = time.time()
    # use random_projection() to sort the array, to fit the shape k * m. k groups with m points in each group
    centroid_sort_idx = centroid_sort_idx.reshape(n_cluster, -1)

    total_permutation = get_total_permutation(n_classifier, n_cluster)

    # here extract a vector for each group and we get the k vectors. Use the k vectors as the centroid of the model
    centroid_l_l = []
    for i in range(n_classifier):
        model_centroid_l = []
        for j in range(n_cluster):
            # idx = centroid_sort_idx[j][i]
            idx = centroid_sort_idx[j][total_permutation[j][i]]
            model_centroid_l.append(centroids[idx])
        centroid_l_l.append(model_centroid_l)
    centroid_l_l = np.array(centroid_l_l)
    return centroid_l_l, rp_end_time - rp_start_time
