import argparse
import hashlib
import os.path
import pickle
import random

from allpairspy import AllPairs

import utils


def combine_distribution(distribution_path, constraints_dir='./constraints', store_dir='./data'):
    with open(distribution_path, 'r', encoding='utf-8') as f:
        source_distribution = f.readline().strip()
    distribution_map = eval(source_distribution)
    tensor_info = distribution_map['tensor_info']
    other_info = distribution_map['other_info']

    api = os.path.basename(distribution_path)
    api_dir = os.path.join(store_dir, api)
    if not os.path.exists(store_dir):
        os.makedirs(store_dir)
    if not os.path.exists(api_dir):
        os.mkdir(api_dir)

    constraint_path = os.path.join(constraints_dir, api)

    combined_testcases = combine_info(tensor_info, other_info, constraint_path)
    for combined_testcase in combined_testcases:
        fname = hashlib.sha1(str(combined_testcase).encode('utf-8')).hexdigest() + '.p'
        pickle.dump(combined_testcase, open(os.path.join(api_dir, fname), 'wb'))


def combine_info(tensor_info, other_info, constraint_path):
    tensor_type_names = []
    tensor_type_shapes = []
    tensor_type_dtypes = []
    tensor_type_value_distribution = []
    for tensor_type_name, tensor_type_info in tensor_info.items():
        tensor_type_names.append(tensor_type_name)
        tensor_type_shapes.append(
            [list(shape) for shape in tensor_info[tensor_type_name]['shape']]
        )
        value_distribution = tensor_info[tensor_type_name]['value_distribution']
        tensor_type_dtypes.append(
            list(value_distribution.keys())
        )
        tensor_type_value_distribution.append(value_distribution)

    tensor_type_values = []
    # generate tensor type parameter
    for i in range(len(tensor_type_shapes)):
        shape = tensor_type_shapes[i]
        dtype = tensor_type_dtypes[i]
        value_distribution = tensor_type_value_distribution[i]

        tensor_type_value = []
        parameters = [shape, dtype]
        pairs = AllPairs(parameters, n=2)
        for pair in pairs:
            tensor_type_value.append(utils.generate_tensor(pair[0], pair[1], value_distribution[pair[1]]))
        tensor_type_values.append(tensor_type_value)

    n = len(tensor_type_values)
    combined_test_cases = []

    rules = utils.construct_rules(constraint_path)

    def is_valid_combination(values, names):
        dictionary = dict(zip(names, values))
        other_info_dictionary = generate_other_info(other_info)
        dictionary.update(other_info_dictionary)

        for rule in rules:
            try:
                if rule(dictionary):
                    return False
            except KeyError:
                pass

        if len(dictionary) >= len(names) + len(other_info_dictionary):
            combined_test_cases.append(dictionary)

        return True

    if n >= 2:
        pairs = AllPairs(tensor_type_values, n=2,
                         filter_func=lambda values: is_valid_combination(values, tensor_type_names))
        for _ in pairs:
            continue
    elif n == 1:
        for val in tensor_type_values[0]:
            is_valid_combination([val], tensor_type_names)
    else:
        for i in range(100):
            combined_test_cases.append(generate_other_info(other_info))

    return combined_test_cases


def generate_other_info(other_info):
    other = {}

    for parameter_name, probabilities in other_info.items():
        parameter_values = list(probabilities.keys())
        probabilities_values = list(probabilities.values())
        generated_parameter = random.choices(parameter_values, probabilities_values, k=1)[0]
        other[parameter_name] = generated_parameter

    return other


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--distribution_path', type=str)
    args = parser.parse_args()
    combine_distribution(args.distribution_path)
