import numpy as np
from Neural_Weight_Quantization.util import dir_io, vecs_io
import time
import os
import multiprocessing

'''
count the total score_table from all the score table in each classifier
'''


def integrate_score(cluster_score_l, partition_info, config, save=False):
    start_time = time.time()

    n_query = cluster_score_l[0].shape[0]
    share_score_table = multiprocessing.Manager().list()
    for _ in range(n_query):
        share_score_table.append(0)

    manager = multiprocessing.managers.BaseManager()
    manager.register('IntegrateScoreTable', IntegrateScoreTable)
    manager.start()
    parallel_method = integrate_score_factory(config)
    parallel_obj = manager.IntegrateScoreTable(cluster_score_l, partition_info, config['n_item'],
                                               multiprocessing.cpu_count())
    res_l = []
    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    for i in range(multiprocessing.cpu_count()):
        res = pool.apply_async(parallel_method, args=(parallel_obj, share_score_table, i))
        res_l.append(res)
    pool.close()
    pool.join()

    share_score_table = np.array(share_score_table, dtype=np.float32)
    print(share_score_table)

    if save:
        total_score_table_dir = '%s/total_score_table.npy' % config['program_train_para_dir']
        dir_io.save_numpy(total_score_table_dir, share_score_table)
    end_time = time.time()
    intermediate = {
        'time': end_time - start_time
    }
    print('save score table success')
    return share_score_table, intermediate


def integrate_score_factory(config):
    _type = config['integrate_score']['type']
    if _type == 'dot_product':
        return dot_product_parallel
    raise Exception('do not support the type of integrate score')


class IntegrateScoreTable:
    def __init__(self, cluster_score_l, partition_info, n_item, total_process):
        self.cluster_score_l = cluster_score_l
        self.weight_l_l = partition_info[0]
        self.total_process = total_process
        self.n_item = n_item

    def get_share_data(self):
        return self.cluster_score_l, self.weight_l_l, self.total_process, self.n_item


'''
count the score_table for query
'''


def dot_product_parallel(obj, share_score_table, start_idx):
    cluster_score_l, weight_l_l, total_process, n_item = obj.get_share_data()
    # iteration for every query
    # cluster_score_l: n_classifier, n_query, n_cluster
    n_classifier = len(cluster_score_l)
    for i in range(start_idx, cluster_score_l[0].shape[0], total_process):
        if i % 50 == 0: print("get score table " + str(i))
        # tmp_score_table = np.zeros(shape=n_item, dtype=np.float32)
        tmp_score_table = np.ones(shape=n_item, dtype=np.float32)
        # iteration for every classifier
        for k in range(n_classifier):
            # iteration for every item
            for j in range(n_item):
                tmp_score_table[j] *= np.dot(cluster_score_l[k][i], weight_l_l[k][j])
        share_score_table[i] = tmp_score_table
    print("finish parallel")


def integrate_score_cpp(config):
    program_train_para_dir = config['program_train_para_dir']
    n_classifier = config['n_classifier']
    project_dir = config['project_dir']

    start_time = time.time()

    integrate_type = config['integrate_score']['type']
    if integrate_type == 'dot_product':
        os.system('%s/Neural_Weight_Quantization/CPP_Project/integrate_score/build/dot_product %s %d' % (
            project_dir, program_train_para_dir, n_classifier))
    else:
        raise Exception("not support integrate score type")

    score_table_fname = '%s/score_table.fvecs' % program_train_para_dir
    score_table, dim = vecs_io.fvecs_read(score_table_fname)

    end_time = time.time()
    intermediate = {
        'time': end_time - start_time
    }
    print('save score table success')
    return score_table, intermediate
