import numpy as np
import time
from Neural_Quantization.util import dir_io, groundtruth, read_data
import sklearn.cluster as cls
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple_graph_partition"
}
'''
max_iter = 40
k_graph = 50
graph_preconfiguration = 'strong'


# strong eco fast fastsocial ecosocial strongsocial


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    multi_codebook_model = cls.KMeans(n_clusters=n_cluster * n_classifier, init='k-means++', max_iter=max_iter)

    kmeans_start_time = time.time()
    print("start generate joint codebook")
    multi_codebook_model.fit(base)
    print("end generate joint codebook")
    kmeans_end_time = time.time()
    res_intermediate['build_codebook_time'] = kmeans_end_time - kmeans_start_time

    centroids = multi_codebook_model.cluster_centers_
    np.savetxt('%s/centroid.txt' % program_train_para_dir, centroids, fmt='%d')
    centroid_l_l = assign_centroid(centroids, n_classifier, n_cluster,
                                   kahip_dir, program_train_para_dir)

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        centroid_l = centroid_l_l[i]

        label, time_consumed = parallel_get_label(base, centroid_l, save_dir)

        tmp_intermediate['count_label_time'] = time_consumed
        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def parallel_get_label(data, centroid_l, save_dir):
    start_time = time.time()
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_labels = np.zeros(data.shape[0]).astype(np.int64)
    for i, res in enumerate(res_l, 0):
        tmp_labels = res.get()
        for j in range(len(tmp_labels)):
            res_labels[i + j * n_process] = tmp_labels[j]
    np.savetxt('%s/partition.txt' % save_dir, res_labels, fmt='%d')
    end_time = time.time()
    time_consumed = end_time - start_time
    return res_labels, time_consumed


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    labels = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        tmp_label = np.argmin(tmp_dis, axis=0)
        labels.append(tmp_label)
    return np.array(labels, dtype=np.int64)


# return centroid_l_l, time to assign centroid
def assign_centroid(centroids, n_classifier, n_cluster, kahip_dir, program_train_para_dir):
    # get permutation of the codeword
    permutation_l = np.random.permutation(n_classifier * n_cluster).reshape(n_classifier, -1)
    # here extract a vector for each group and we get the k vectors. Use the k vectors as the centroid of the model
    centroid_l_l = []
    for i in range(n_classifier):
        model_centroid_l = centroids[permutation_l[i]]
        centroid_l_l.append(model_centroid_l)
    centroid_l_l = np.array(centroid_l_l)

    return centroid_l_l
