import numpy as np
import time
from Neural_Quantization.util import dir_io
import sklearn.cluster as cls
import os
import multiprocessing

'''
{
    "type": "kmeans_multiple"
}
'''
max_iter = 40


def dataset_partition(base, base_base_gnd, config):
    dataset_partition_config = config['dataset_partition']
    n_cluster = config['n_cluster']
    n_classifier = config['n_classifier']
    kahip_dir = config['kahip_dir']
    program_train_para_dir = config['program_train_para_dir']
    data_fname = config['data_fname']
    dimension = config['dimension']

    base_seg_l, dim_l = segment_data(n_cluster, dimension, base)

    res_intermediate = {}
    intermediate_l = []
    label_l = []

    for i in range(n_classifier):
        tmp_intermediate = {}
        save_dir = '%s/Classifier_%d/dataset_partition' % (program_train_para_dir, i)

        kmeans_start_time = time.time()
        kmeans_model = cls.KMeans(n_clusters=n_cluster, init='k-means++', max_iter=max_iter, random_state=100 + i)
        kmeans_model.fit(base)
        kmeans_end_time = time.time()
        label = kmeans_model.labels_
        np.savetxt('%s/partition.txt' % save_dir, label, fmt='%d')

        tmp_intermediate['kmeans_time'] = kmeans_end_time - kmeans_start_time
        label_l.append(label)
        intermediate_l.append(tmp_intermediate)
    label_l = np.array(label_l)
    res_intermediate['dataset_partition'] = intermediate_l
    return label_l, res_intermediate


def segment_data(n_codebook, dimension, data):
    n_seg = dimension // n_codebook
    seg_l = []
    dim_l = []
    for i in range(n_codebook):
        if i == n_codebook - 1:
            seg_l.append(data[:, i * n_seg:])
            continue
        seg_l.append(data[:, i * n_seg: (i + 1) * n_seg])
    for i in range(len(seg_l)):
        seg_l[i] = seg_l[i].astype(np.float32)
        dim_l.append(len(seg_l[i][0]))
    return seg_l, dim_l
