import numpy as np
import time
from Neural_Quantization.util import dir_io, groundtruth, read_data
import sklearn.cluster as cls
import faiss
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple_graph_partition"
}
'''
max_iter = 40
k_graph = 50
graph_preconfiguration = 'strong'


# strong eco fast fastsocial ecosocial strongsocial


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    multi_codebook_model = cls.KMeans(n_clusters=n_cluster * n_classifier, init='k-means++', max_iter=max_iter)

    kmeans_start_time = time.time()
    print("start generate joint codebook")
    multi_codebook_model.fit(base)
    print("end generate joint codebook")
    kmeans_end_time = time.time()
    res_intermediate['build_codebook_time'] = kmeans_end_time - kmeans_start_time

    centroids = multi_codebook_model.cluster_centers_
    centroid_l_l, build_graph_time, kahip_partition_time = assign_centroid(centroids, n_classifier, n_cluster, kahip_dir, program_train_para_dir)
    res_intermediate['build_graph_time'] = build_graph_time
    res_intermediate['kahip_partition_time'] = kahip_partition_time

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        centroid_l = centroid_l_l[i]

        label, time_consumed = parallel_get_label(base, centroid_l, save_dir)

        tmp_intermediate['count_label_time'] = time_consumed
        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def parallel_get_label(data, centroid_l, save_dir):
    start_time = time.time()
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_labels = np.zeros(data.shape[0]).astype(np.int64)
    for i, res in enumerate(res_l, 0):
        tmp_labels = res.get()
        for j in range(len(tmp_labels)):
            res_labels[i + j * n_process] = tmp_labels[j]
    np.savetxt('%s/partition.txt' % save_dir, res_labels, fmt='%d')
    end_time = time.time()
    time_consumed = end_time - start_time
    return res_labels, time_consumed


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    labels = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        tmp_label = np.argmin(tmp_dis, axis=0)
        labels.append(tmp_label)
    return np.array(labels, dtype=np.int64)


# return centroid_l_l, time to assign centroid
def assign_centroid(centroids, n_classifier, n_cluster, kahip_dir, program_train_para_dir):
    def build_graph(centroids, k_graph):
        if k_graph + 1 > len(centroids):
            raise Exception("k_graph + 1 > the number of centroids, program exit")
        base_base_gnd = groundtruth.get_gnd(centroids, centroids, k_graph + 1, 'l2')
        index_arr = base_base_gnd[:, :] + 1  # kahip need the index start from 1, so +1
        weightless_graph = index_arr.tolist()
        for i in range(len(weightless_graph)):
            weightless_graph[i] = set(weightless_graph[i])

        # print("get the nearest k result")

        for i in range(len(weightless_graph)):
            if (i + 1) in weightless_graph[i]:
                weightless_graph[i].remove((i + 1))
            for vertices_index in weightless_graph[i]:
                if (i + 1) not in weightless_graph[vertices_index - 1]:
                    weightless_graph[vertices_index - 1].add(i + 1)

        res_graph = []
        for i in range(len(weightless_graph)):
            tmp_line = {}
            for vertices in weightless_graph[i]:
                tmp_line[vertices] = 1
            res_graph.append(tmp_line)
        # print("change the rank into graph successfully")
        return res_graph

    def save_graph(graph, save_dir):
        # graph is the 2d array
        vertices = len(graph)
        edges = 0
        for vecs in graph:
            edges += len(vecs)
        assert edges % 2 == 0
        edges = edges / 2

        save_dir = '%s/centroid_graph.graph' % save_dir
        print("save dir" + save_dir)
        dir_io.save_graph_edge_weight(save_dir, graph, vertices, edges)
        print("save graph complete")

    # generate random permutation after got the group
    def get_total_permutation(n_classifier, n_cluster):
        # random select
        total_permutation = None  # n_cluster * n_classifier
        for i in range(n_cluster):
            arr = np.random.permutation(n_classifier)
            arr = np.array([arr])
            if i == 0:
                total_permutation = arr
            else:
                total_permutation = np.append(total_permutation, arr, axis=0)
        return total_permutation

    # build knn graph
    build_graph_start_time = time.time()
    knn_graph = build_graph(centroids, k_graph)
    build_graph_end_time = time.time()
    build_graph_time = build_graph_end_time - build_graph_start_time
    save_graph(knn_graph, program_train_para_dir)

    # use kaffpa to partition the graph
    kahip_command = '%s/deploy/kaffpa %s/centroid_graph.graph --preconfiguration=%s ' \
                    '--output_filename=%s/centroid_partition.txt --imbalance=0 ' \
                    '--k=%d' % (
                        kahip_dir, program_train_para_dir, graph_preconfiguration, program_train_para_dir, n_cluster)
    print(kahip_command)
    kahip_partition_start_time = time.time()
    os.system(kahip_command)
    kahip_partition_end_time = time.time()
    kahip_partition_time = kahip_partition_end_time - kahip_partition_start_time

    # read the graph and assign the centroid
    labels = read_data.read_partition('%s/centroid_partition.txt' % program_train_para_dir)
    labels = np.array(labels)
    centroid_sort_idx = []
    for cluster_i in range(n_cluster):
        idx_l = np.argwhere(labels == cluster_i).reshape(-1)
        centroid_sort_idx.append(idx_l)
    centroid_sort_idx = np.array(centroid_sort_idx)

    total_permutation = get_total_permutation(n_classifier, n_cluster)
    # here extract a vector for each group and we get the k vectors. Use the k vectors as the centroid of the model
    centroid_l_l = []
    for i in range(n_classifier):
        model_centroid_l = []
        for j in range(n_cluster):
            # idx = centroid_sort_idx[j][i]
            idx = centroid_sort_idx[j][total_permutation[j][i]]
            model_centroid_l.append(centroids[idx])
        centroid_l_l.append(model_centroid_l)
    centroid_l_l = np.array(centroid_l_l)

    return centroid_l_l, build_graph_time, kahip_partition_time
