import numpy as np
import time
import os
import multiprocessing

'''
{
    "type": "knn"
}
'''
k_graph = 100
random_seed = 100
n_overlap = 4
size_variance = 1.5


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    project_dir = config['project_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    build_graph_start_time = time.time()
    graph = build_graph(base_base_gnd)
    build_graph_end_time = time.time()
    res_intermediate['bulid_graph_time'] = build_graph_end_time - build_graph_start_time

    save_graph_start_time = time.time()
    save_graph(graph, program_train_para_dir)
    save_graph_end_time = time.time()
    res_intermediate['save_graph_time'] = save_graph_end_time - save_graph_start_time

    get_centroid_start_time = time.time()
    centroid_l_l = get_centroids_l(project_dir, data_fname, program_train_para_dir, n_cluster, n_classifier,
                                   size_variance)
    get_centroid_end_time = time.time()
    res_intermediate['get_centroid_total_time'] = get_centroid_end_time - get_centroid_start_time

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)
        centroid_l = centroid_l_l[i]

        label, get_label_time = parallel_get_label(base, centroid_l, save_dir)
        tmp_intermediate['parallel_get_label_time'] = get_label_time

        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def parallel_get_label(data, centroid_l, save_dir):
    start_time = time.time()
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    res_l = []
    n_process = multiprocessing.cpu_count()
    for i in range(n_process):
        res = p.apply_async(count_centroid, args=(data, centroid_l, i, n_process))
        res_l.append(res)

    p.close()
    p.join()
    res_labels = np.zeros(shape=(data.shape[0], n_overlap)).astype(np.int64)
    for i, res in enumerate(res_l, 0):
        tmp_labels = res.get()
        for j in range(len(tmp_labels)):
            res_labels[i + j * n_process] = tmp_labels[j]
    np.savetxt('%s/label.txt' % save_dir, res_labels, fmt='%d')
    end_time = time.time()
    time_consumed = end_time - start_time
    return res_labels, time_consumed


def count_centroid(base, centroid_l, idx, pool_size):
    # count the distance for each item and centroid to get the distance_table
    labels = []
    len_base = len(base)
    for i in range(idx, len_base, pool_size):
        vecs = base[i]
        tmp_dis = [np.linalg.norm(vecs - centroid) for centroid in centroid_l]
        sort_idx_l = np.argsort(tmp_dis)
        labels.append([sort_idx_l[_] for _ in range(n_overlap)])
    return np.array(labels, dtype=np.int64)


def get_centroids_l(project_dir, data_fname, program_train_para_dir, n_cluster, n_classifier, size_variance):
    command = "%s/Neural_Overlapping_Quantization/CPP_Project/knn_overlap/build/overlap %s/data/dataset/%s/base.fvecs " \
              "%s %d %d %.3f" % (
                  project_dir, project_dir, data_fname, program_train_para_dir, n_cluster, n_classifier, size_variance)
    os.system(command)
    centroid_l_l = []
    for i in range(n_classifier):
        centroid_path = "%s/Classifier_%d/dataset_partition/centroids.txt" % (program_train_para_dir, i)
        centroids = np.loadtxt(centroid_path)
        centroid_l_l.append(centroids)
    return centroid_l_l


def build_graph(base_base_gnd):
    vertices = len(base_base_gnd)
    if vertices < k_graph + 1:
        raise Exception("build graph error, input dataset is too samll, do not meet the demand of number of edge")
    if k_graph + 1 > base_base_gnd.shape[1]:
        # raise Exception("k_graph + 1 > the length in base_base_gnd, system crash. "
        #                 "please use increase the k in base_base_gnd or decrease k_graph")
        print("\033[32;1m Warning! the length of k_graph + 1 is larger than base_base_gnd could provide \033[0m")
        print("length of base_base_gnd %d" % (base_base_gnd.shape[1]))

    index_arr = base_base_gnd[:, :k_graph + 1]  # +1 because the first index must be itself
    index_arr = index_arr[:, :]
    graph = index_arr.tolist()
    for i in range(len(graph)):
        graph[i] = set(graph[i])

    for i in range(len(graph)):
        if i in graph[i]:
            graph[i].remove(i)
        for vertices_index in graph[i]:
            if i not in graph[vertices_index]:
                graph[vertices_index].add(i)
    return graph


def save_graph(graph, save_dir):
    # graph is the 2d array
    vertices = len(graph)

    save_dir = '%s/graph.graph' % save_dir
    print("save dir" + save_dir)
    with open(save_dir, 'w') as f:
        f.write("%d\n" % vertices)
        for nearest_index in graph:
            row_index = str(len(nearest_index)) + " "
            for item in nearest_index:
                row_index += str(item) + " "
            f.write(row_index + '\n')
    print("save graph complete")
