import numpy as np
import time
from Neural_Quantization.util import dir_io, read_data
import os
import multiprocessing

'''
{
    "type": "span_tree",
    "graph_partition": {
      "type": "parhip"
    }
}
'''
max_iter = 40
graph_partition_type = 'parhip'
preconfiguration = 'fastsocial'


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']
    project_dir = config['project_dir']

    graph_partition_type = dataset_partition_config['graph_partition']['type']
    if graph_partition_type == 'kaffpa':
        preconfiguration = 'eco'
    # stong eco fast fastsocial ecosocial strongsocial
    elif graph_partition_type == 'parhip':
        # preconfiguration = 'fastsocial'
        preconfiguration = 'ecosocial'
    # ecosocial fastsocial ultrafastsocial ecomesh fastmesh ultrafastmesh

    res_intermediate = {}

    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        build_graph_start_time = time.time()
        # 调用C的库
        build_tree_command = '%s/Span-Tree/build/span_tree %s/Span-Tree/init_graph/%s-text.uknng ' \
                             '%s/Classifier_%d/dataset_partition/graph.graph %d' % (
                                 project_dir, project_dir, data_fname, program_train_para_dir, i, 100 + i)
        print(build_tree_command)
        os.system(build_tree_command)
        build_graph_end_time = time.time()
        tmp_intermediate['build_tree_time'] = build_graph_end_time - build_graph_start_time

        graph_partition_start_time = time.time()
        label = graph_partition(save_dir, kahip_dir, n_cluster)
        label_l.append(label)
        graph_partition_end_time = time.time()
        tmp_intermediate['graph_partition_time'] = graph_partition_end_time - graph_partition_start_time

        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def graph_partition(save_dir, kahip_dir, n_cluster):
    # this function is to invoke kahip and read partition.txt
    if graph_partition_type == 'kaffpa':
        kahip_command = '%s/deploy/kaffpa %s/graph.graph --preconfiguration=%s --output_filename=%s/partition.txt ' \
                        '--k=%d' % (
                            kahip_dir, save_dir, preconfiguration, save_dir, n_cluster)
        print(kahip_command)
        os.system(kahip_command)
    elif graph_partition_type == 'parhip':
        kahip_command = 'mpirun -n %d %s/deploy/parhip %s/graph.graph --preconfiguration %s ' \
                        '--save_partition --k %d' % (
                            multiprocessing.cpu_count() // 2, kahip_dir, save_dir, preconfiguration, n_cluster)
        print(kahip_command)
        os.system(kahip_command)
        dir_io.move_file('tmppartition.txtp', '%s/partition.txt' % save_dir)
    partition_dir = '%s/partition.txt' % save_dir
    labels = read_data.read_partition(partition_dir)
    labels = np.array(labels)
    return labels
