import numpy as np
import time
from Neural_Weight_Quantization.util import dir_io, vecs_io
import sklearn.cluster as cls
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple_nearest_centroid"
}
'''
max_iter = 40


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    multi_codebook_model = cls.KMeans(n_clusters=n_cluster * n_classifier, init='k-means++', max_iter=max_iter)

    kmeans_start_time = time.time()
    print("start generate joint codebook")
    multi_codebook_model.fit(base)
    print("end generate joint codebook")
    kmeans_end_time = time.time()
    res_intermediate['build_codebook_time'] = kmeans_end_time - kmeans_start_time

    centroids = multi_codebook_model.cluster_centers_
    centroid_l_l, rp_time = assign_centroid(centroids, n_classifier, n_cluster)
    res_intermediate['random_projection_time'] = rp_time

    intermediate_l = []
    weight_l_l = []
    label_l_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        centroid_l = centroid_l_l[i]
        print("start get weight_l classifier", i)
        weight_l, label_l, time_consumed = parallel_get_label(base, centroid_l, save_dir)
        print("end get weight_l classifier", i)

        tmp_intermediate['count_label_time'] = time_consumed
        weight_l_l.append(weight_l)
        label_l_l.append(label_l)

        intermediate_l.append(tmp_intermediate)

    weight_l_l = np.array(weight_l_l)
    label_l_l = np.array(label_l_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return (weight_l_l, label_l_l), res_intermediate


def parallel_get_label(data, centroid_l, save_dir):
    start_time = time.time()
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_weight_l = np.zeros(shape=(data.shape[0], len(centroid_l)), dtype=np.float)
    res_label_l = np.zeros(shape=(data.shape[0]), dtype=np.int)
    for i, res in enumerate(res_l, 0):
        tmp_weight, tmp_label = res.get()
        for j in range(len(tmp_label)):
            res_weight_l[i + j * n_process] = tmp_weight[j]
            res_label_l[i + j * n_process] = tmp_label[j]
    vecs_io.fvecs_write('%s/weight_l.fvecs' % save_dir, res_weight_l)
    np.savetxt('%s/label.txt' % save_dir, res_label_l, fmt='%d')
    end_time = time.time()
    time_consumed = end_time - start_time
    return res_weight_l, res_label_l, time_consumed


def softmax(x):
    exp_res = np.exp(-x)
    res = exp_res / np.sum(exp_res)
    return res


def powermax(x):
    tmp_res = np.power(x, -3)
    res = tmp_res / np.sum(tmp_res)
    return res


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    weight_l = []
    label_l = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        tmp_dis = np.array(tmp_dis, dtype=np.float)
        weight = powermax(tmp_dis)
        weight_l.append(weight)
        tmp_label = np.argmin(tmp_dis, axis=0)
        label_l.append(tmp_label)
    return np.array(weight_l, dtype=np.float32), np.array(label_l, dtype=np.int)


def assign_centroid(centroids, n_classifier, n_cluster):
    def random_projection(centroid_l, n_classifier, n_cluster):
        res_idx = np.arange(n_classifier * n_cluster)
        divide_and_conquer(0, n_cluster, centroid_l, 0, len(centroid_l), res_idx)
        return res_idx

    def divide_and_conquer(depth, k, centroid_l, start, end, res_idx):
        if 2 ** depth == k:
            return
        # vector = np.random.rand(centroid_l.shape[1])
        # random_vector = vector / np.linalg.norm(vector)
        random_vector = np.random.normal(size=centroid_l.shape[1], scale=100)
        random_l = []
        for i in range(start, end):
            random_num = np.dot(random_vector, centroid_l[res_idx[i]])
            random_l.append(random_num)
        # random_l is the result of dot product of centroid and random vector(follow Gauss distribution)
        random_l = np.array(random_l)
        depth += 1
        sort_indices = np.argsort(random_l) + start
        mid = int((start + end - 1) / 2)
        res_idx[start:end] = sort_indices
        divide_and_conquer(depth, k, centroid_l, start, mid, res_idx)
        divide_and_conquer(depth, k, centroid_l, mid, end, res_idx)

    # generate random permutation after got the group
    def get_total_permutation(n_classifier, n_cluster):
        # random select
        total_permutation = None  # n_cluster * n_classifier
        for i in range(n_cluster):
            arr = np.random.permutation(n_classifier)
            arr = np.array([arr])
            if i == 0:
                total_permutation = arr
            else:
                total_permutation = np.append(total_permutation, arr, axis=0)
        return total_permutation

    # to see the initial distribution of label
    # get_label(model.labels_)
    # print("centroids", centroids[:, :2])
    rp_start_time = time.time()
    centroid_sort_idx = random_projection(centroids, n_classifier, n_cluster)
    rp_end_time = time.time()
    # use random_projection() to sort the array, to fit the shape k * m. k groups with m points in each group
    centroid_sort_idx = centroid_sort_idx.reshape(n_cluster, -1)

    # permutation_l = get_total_permutation(n_classifier, n_cluster)

    # here extract a vector for each group and we get the k vectors. Use the k vectors as the centroid of the model
    centroid_l_l = []
    for i in range(n_classifier):
        model_centroid_l = []
        for j in range(n_cluster):
            idx = centroid_sort_idx[j][i]
            # idx = centroid_sort_idx[j][permutation_l[j][i]]
            model_centroid_l.append(centroids[idx])
        centroid_l_l.append(model_centroid_l)
    centroid_l_l = np.array(centroid_l_l)
    return centroid_l_l, rp_end_time - rp_start_time
