import numpy as np
import time
from Neural_Quantization.util import dir_io, read_data
import os
import multiprocessing

'''
{
    "type": "knn",
    "build_graph": {
    },
    "graph_partition": {
      "type": "parhip"
    }
}
'''
increase_weight = 500
k_graph = 40
graph_partition_type = 'parhip'
preconfiguration = 'fastsocial'


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']

    res_intermediate = {}

    graph_partition_type = dataset_partition_config['graph_partition']['type']
    if graph_partition_type == 'kaffpa':
        preconfiguration = 'eco'
    # stong eco fast fastsocial ecosocial strongsocial
    elif graph_partition_type == 'parhip':
        preconfiguration = 'fastsocial'
    # ecosocial fastsocial ultrafastsocial ecomesh fastmesh ultrafastmesh

    intermediate_l = []
    label_l = []

    previous_info = None
    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        build_graph_start_time = time.time()
        graph = build_graph(base, base_base_gnd, previous_info)
        build_graph_end_time = time.time()
        tmp_intermediate['bulid_graph_time'] = build_graph_end_time - build_graph_start_time

        save_graph_start_time = time.time()
        save_graph(graph, save_dir)
        save_graph_end_time = time.time()
        tmp_intermediate['save_graph_time'] = save_graph_end_time - save_graph_start_time

        graph_partition_start_time = time.time()
        label = graph_partition(save_dir, kahip_dir, n_cluster)
        label_l.append(label)
        graph_partition_end_time = time.time()
        tmp_intermediate['graph_partition_time'] = graph_partition_end_time - graph_partition_start_time

        previous_info = (graph, label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def build_graph(base, base_base_gnd, previous_info):
    if previous_info is not None:
        graph, label = previous_info
        for i, cls in enumerate(label, 0):
            for neighbor in graph[i]:
                if label[neighbor - 1] != cls:  # means the neighbor have different cluster
                    # graph[i][neighbor] = 0
                    # graph[neighbor - 1][i + 1] = 0
                    graph[i][neighbor] = graph[i][neighbor] + increase_weight
                    graph[neighbor - 1][i + 1] = graph[neighbor - 1][i + 1] + increase_weight

        return graph

    vertices = len(base)
    if vertices < k_graph + 1:
        raise Exception("build graph error, input dataset is too samll, do not meet the demand of number of edge")
    if k_graph + 1 > base_base_gnd.shape[1]:
        # raise Exception("k_graph + 1 > the length in base_base_gnd, system crash. "
        #                 "please use increase the k in base_base_gnd or decrease k_graph")
        print("\033[32;1m Warning! the length of k_graph + 1 is larger than base_base_gnd could provide \033[0m")
        print("length of base_base_gnd %d" % (base_base_gnd.shape[1]))

    index_arr = base_base_gnd[:, :k_graph + 1]  # +1 because the first index must be itself
    index_arr = index_arr[:, :] + 1  # kahip need the index start from 1, so +1
    weightless_graph = index_arr.tolist()
    for i in range(len(weightless_graph)):
        weightless_graph[i] = set(weightless_graph[i])

    # print("get the nearest k result")

    for i in range(len(weightless_graph)):
        if (i + 1) in weightless_graph[i]:
            weightless_graph[i].remove((i + 1))
        for vertices_index in weightless_graph[i]:
            if (i + 1) not in weightless_graph[vertices_index - 1]:
                weightless_graph[vertices_index - 1].add(i + 1)

    res_graph = []
    for i in range(len(weightless_graph)):
        tmp_line = {}
        for vertices in weightless_graph[i]:
            tmp_line[vertices] = 1
        res_graph.append(tmp_line)
    # print("change the rank into graph successfully")
    return res_graph


def save_graph(graph, save_dir):
    # graph is the 2d array
    vertices = len(graph)
    edges = 0
    for vecs in graph:
        edges += len(vecs)
    assert edges % 2 == 0
    edges = edges / 2

    save_dir = '%s/graph.graph' % save_dir
    print("save dir" + save_dir)
    dir_io.save_graph_edge_weight(save_dir, graph, vertices, edges)
    print("save graph complete")


def graph_partition(save_dir, kahip_dir, n_cluster):
    # this function is to invoke kahip and read partition.txt
    if graph_partition_type == 'kaffpa':
        kahip_command = '%s/deploy/kaffpa %s/graph.graph --preconfiguration=%s --output_filename=%s/partition.txt ' \
                        '--k=%d' % (
                            kahip_dir, save_dir, preconfiguration, save_dir, n_cluster)
        print(kahip_command)
        os.system(kahip_command)
    elif graph_partition_type == 'parhip':
        kahip_command = 'mpirun -n %d %s/deploy/parhip %s/graph.graph --preconfiguration %s ' \
                        '--save_partition --k %d' % (
                            multiprocessing.cpu_count() // 2, kahip_dir, save_dir, preconfiguration, n_cluster)
        print(kahip_command)
        os.system(kahip_command)
        dir_io.move_file('tmppartition.txtp', '%s/partition.txt' % save_dir)
    partition_dir = '%s/partition.txt' % save_dir
    labels = read_data.read_partition(partition_dir)
    labels = np.array(labels)
    return labels
