import numpy as np
from Neural_Overlapping_Quantization.util import dir_io
import time
import os
import multiprocessing
from multiprocessing.managers import BaseManager

'''
count the total score_table from all the score table in each classifier
'''


def integrate_score(cluster_score_l, partition_info, config, save=False):
    start_time = time.time()

    manager = BaseManager()
    manager.register('IntegrateScoreTable', IntegrateScoreTable)
    manager.start()
    parallel_method = integrate_score_factory(config)
    parallel_obj = manager.IntegrateScoreTable(cluster_score_l, partition_info, config['n_item'],
                                               multiprocessing.cpu_count())
    res_l = []
    pool = multiprocessing.Pool(multiprocessing.cpu_count())
    for i in range(multiprocessing.cpu_count()):
        res = pool.apply_async(parallel_method, args=(parallel_obj, i))
        res_l.append(res)
    pool.close()
    pool.join()

    score_table = [0] * cluster_score_l[0].shape[0]
    for tmp_res in res_l:
        tmp_res = tmp_res.get()
        for idx in tmp_res:
            score_table[idx] = tmp_res[idx]

    score_table = np.array(score_table, dtype=np.float32)

    if save:
        total_score_table_dir = '%s/total_score_table.npy' % config['program_train_para_dir']
        dir_io.save_numpy(total_score_table_dir, score_table)
    end_time = time.time()
    intermediate = {
        'time': end_time - start_time
    }
    print('save score table success')
    return score_table, intermediate


def integrate_score_factory(config):
    _type = config['integrate_score']['type']
    if _type == 'multiply':
        return multiply_parallel
    elif _type == 'multiply_average':
        return multiply_average_parallel
    elif _type == 'add':
        return add_parallel
    elif _type == 'multiply_sum':
        return multiply_sum_parallel
    raise Exception('do not support the type of integrate score')


class IntegrateScoreTable:
    def __init__(self, cluster_score_l, partition_info, n_item, total_process):
        self.score_table = np.zeros(shape=(cluster_score_l[0].shape[0], n_item), dtype=np.float32)
        self.cluster_score_l = cluster_score_l
        self.label_l = partition_info[0]
        self.label_map_l = partition_info[1]
        self.total_process = total_process
        self.n_item = n_item

    def get_share_data(self):
        return self.cluster_score_l, self.label_l, self.label_map_l, self.total_process, self.n_item


'''
count the score_table for query
'''


def multiply_parallel(obj, start_idx):
    cluster_score_l, label_l, label_map_l, total_process, n_item = obj.get_share_data()
    score_table = {}
    # iteration for every query
    # cluster_score_l: n_classifier, n_query, n_cluster
    for i in range(start_idx, cluster_score_l[0].shape[0], total_process):
        if i % 50 == 0: print("get score table " + str(i))
        # tmp_score_table = np.zeros(shape=n_item, dtype=np.float32)
        tmp_score_table = np.ones(shape=n_item, dtype=np.float32)
        # iteration for each classifier
        for k, tmp_cluster_score in enumerate(cluster_score_l, 0):
            tmp_label_map = label_map_l[k]
            # iteration for every cluster
            for j in range(cluster_score_l[0].shape[1]):
                score_item_idx_l = tmp_label_map[j]
                for _ in score_item_idx_l:
                    # tmp_score_table[_] += tmp_cluster_score[i][j]
                    tmp_score_table[_] *= tmp_cluster_score[i][j]
                score_table[i] = tmp_score_table
    print("finish parallel")
    return score_table


def add_parallel(obj, start_idx):
    cluster_score_l, label_l, label_map_l, total_process, n_item = obj.get_share_data()
    score_table = {}
    # iteration for every query
    # cluster_score_l: n_classifier, n_query, n_cluster
    for i in range(start_idx, cluster_score_l[0].shape[0], total_process):
        if i % 50 == 0: print("get score table " + str(i))
        # tmp_score_table = np.zeros(shape=n_item, dtype=np.float32)
        tmp_score_table = np.ones(shape=n_item, dtype=np.float32)
        # iteration for each classifier
        for k, tmp_cluster_score in enumerate(cluster_score_l, 0):
            tmp_label_map = label_map_l[k]
            # iteration for every cluster
            for j in range(cluster_score_l[0].shape[1]):
                score_item_idx_l = tmp_label_map[j]
                for _ in score_item_idx_l:
                    tmp_score_table[_] += tmp_cluster_score[i][j]
                score_table[i] = tmp_score_table
    print("finish parallel")
    return score_table


def multiply_average_parallel(obj, start_idx):
    cluster_score_l, label_l, label_map_l, total_process, n_item = obj.get_share_data()
    score_table = {}
    # iteration for every query
    # cluster_score_l: n_classifier, n_query, n_cluster
    n_classifier = len(cluster_score_l)
    for i in range(start_idx, cluster_score_l[0].shape[0], total_process):
        if i % 50 == 0: print("get score table " + str(i))
        # tmp_score_table = np.zeros(shape=n_item, dtype=np.float32)
        tmp_score_table = np.ones(shape=n_item, dtype=np.float32)
        # iteration for every classifier
        for k in range(n_classifier):
            # iteration for every item
            for j in range(n_item):
                tmp_label_l = label_l[k][j]
                tmp_classifier_score_l = cluster_score_l[k][i][tmp_label_l]
                tmp_item_score = np.average(tmp_classifier_score_l)
                tmp_score_table[j] *= tmp_item_score
        score_table[i] = tmp_score_table
    print("finish parallel")
    return score_table


def multiply_sum_parallel(obj, start_idx):
    cluster_score_l, label_l, label_map_l, total_process, n_item = obj.get_share_data()
    score_table = {}
    # iteration for every query
    # cluster_score_l: n_classifier, n_query, n_cluster
    n_classifier = len(cluster_score_l)
    for i in range(start_idx, cluster_score_l[0].shape[0], total_process):
        if i % 50 == 0: print("get score table " + str(i))
        # tmp_score_table = np.zeros(shape=n_item, dtype=np.float32)
        tmp_score_table = np.ones(shape=n_item, dtype=np.float32)
        # iteration for every classifier
        for k in range(n_classifier):
            # iteration for every item
            for j in range(n_item):
                tmp_label_l = label_l[k][j]
                tmp_classifier_score_l = cluster_score_l[k][i][tmp_label_l]
                tmp_item_score = np.sum(tmp_classifier_score_l)
                tmp_score_table[j] *= tmp_item_score
        score_table[i] = tmp_score_table
    print("finish parallel")
    return score_table
