import argparse
import math
from tensorflow.keras import backend as K
import h5py
from run_experiment import load_params
from tensorflow.keras.models import load_model, clone_model
import numpy as np
import os
import time

def generate_model_by_model_mutation(model, operator, fault_data):
    """
    Generate models using specific mutate operator
    :param model: model loaded by keras (tensorflow backend default)
    :param operator: mutation operator
    :param mutate_ratio: ratio of selected neurons
    :return: mutation model object
    """
    if operator == 'WS':
        return WS_mut(model=model, fault_data=fault_data)
    elif operator == 'GF':
        return GF_mut(model=model, fault_data=fault_data)
    elif operator == 'NEB':
        return NEB_mut(model=model, fault_data=fault_data)
    elif operator == 'NAI':
        return NAI_mut(model=model, fault_data=fault_data)
    elif operator == 'NS':
        return NS_mut(model=model, fault_data=fault_data)
    else:
        return None


def get_fault_data(fault_path):
    f = h5py.File(fault_path, 'r')
    gset = f['group1']
    fault_data = []

    for s in list(f['group1'].keys()):
        fault_data.append(list(gset[s]))
    print(fault_data)
    return fault_data


def NEB_mut(model, fault_data, mutated_layer_indices=None):
    NEB_model = clone_model(model)
    NEB_model.set_weights(model.get_weights())
    layers = NEB_model.layers
    for data in fault_data:
        layer = layers[data[0]]

        weights = layer.get_weights()
        if len(weights) > 0:
            if len(weights) == 1:
                weights_w = weights[0]
                weights_w = weights_w.transpose()
                weights_w[data[1]] = np.zeros(weights_w[data[1]].shape)
                weights_w = weights_w.transpose()
                weights = [weights_w]
                layer.set_weights(weights)
            else:
                weights_w, weights_b = weights
                weights_w = weights_w.transpose()
                weights_w[data[1]] = np.zeros(weights_w[data[1]].shape)
                weights_w = weights_w.transpose()
                weights_b[data[1]] = 0
                weights = weights_w, weights_b
                layer.set_weights(weights)
    return NEB_model


def NAI_mut(model, fault_data, mutated_layer_indices=None):
    NAI_model = clone_model(model)
    NAI_model.set_weights(model.get_weights())
    layers = NAI_model.layers
    for data in fault_data:
        layer = layers[data[0]]

        weights = layer.get_weights()
        if len(weights) > 0:
            if len(weights) == 1:
                weights_w = weights[0]
                weights_w = weights_w.transpose()
                weights_w[data[1]] *= -1
                weights_w = weights_w.transpose()
                weights = [weights_w]
                layer.set_weights(weights)
            else:
                weights_w, weights_b = weights
                weights_w = weights_w.transpose()
                weights_w[data[1]] *= -1
                weights_w = weights_w.transpose()
                weights_b[data[1]] *= -1
                weights = weights_w, weights_b
                layer.set_weights(weights)
    return NAI_model


def WS_mut(model, fault_data, mutated_layer_indices=None):
    WS_model = clone_model(model)
    WS_model.set_weights(model.get_weights())
    layers = WS_model.layers
    for data in fault_data:
        layer = layers[data[0]]

        weights = layer.get_weights()
        layer_name = type(layer).__name__
        if layer_name == "Conv2D" and len(weights) != 0:
            for val in weights:
                if len(val.shape) > 1:
                    val_shape = val.shape
                    filter_width, filter_height, num_of_input_channels, num_of_output_channels = val_shape
                    copy_list = val.copy()
                    copy_list = np.reshape(copy_list, (
                        filter_width * filter_height * num_of_input_channels, num_of_output_channels))
                    selected_list = copy_list[:, data[1]]
                    shuffle_selected_list = np.random.shuffle(selected_list)
                    copy_list[:, data[1]] = shuffle_selected_list
                    val = np.reshape(copy_list,
                                     (filter_width, filter_height, num_of_input_channels, num_of_output_channels))
                    weights[0] = val
        elif layer_name == "Dense" and len(weights) != 0:
            for val in weights:
                if len(val.shape) > 1:
                    val_shape = val.shape
                    input_dim, output_dim = val_shape
                    copy_list = val.copy()
                    selected_list = copy_list[:, data[1]]
                    shuffle_selected_list = np.random.shuffle(selected_list)
                    copy_list[:, data[1]] = shuffle_selected_list
                    weights[0] = copy_list
        else:
            pass
        layer.set_weights(weights)
    return WS_model


def GF_mut(model, fault_data, distribution='normal', STD=0.1, lower_bound=None, upper_bound=None):
    GF_model = clone_model(model)
    GF_model.set_weights(model.get_weights())
    layers = GF_model.layers
    for data in fault_data:
        print(data)
        layer = layers[data[0]]

        weights = layer.get_weights()
        if len(weights) > 0:
            # assert len(weights) == 2
            if len(weights) == 1:
                weights_w = weights[0]
                weights_w = weights_w.transpose()
                STD = math.sqrt(weights_w[data[1]].var()) * STD
                weights_w[data[1]] += np.random.normal(scale=STD, size=weights_w[data[1]].shape)
                weights_w = weights_w.transpose()
                weights = [weights_w]
                layer.set_weights(weights)
            else:
                weights_w, weights_b = weights
                weights_w = weights_w.transpose()
                STD = math.sqrt(weights_w[data[1]].var()) * STD
                weights_w[data[1]] += np.random.normal(scale=STD, size=weights_w[data[1]].shape)
                weights_w = weights_w.transpose()
                weights = weights_w, weights_b
                layer.set_weights(weights)

    return GF_model


def NS_mut(model, fault_data, mutated_layer_indices=None):
    NS_model = clone_model(model)
    NS_model.set_weights(model.get_weights())
    layers = NS_model.layers
    for data in fault_data:
        layer = layers[data[0]]
        weights = layer.get_weights()
        if len(weights) > 0:
            if len(weights) == 1:
                weights_w = weights[0]
                weights_w = weights_w.transpose()
                permutation = np.random.permutation(weights_w.shape[0])[:1]
                weights_w[permutation[0]], weights_w[data[1]] = weights_w[data[1]].copy(), weights_w[
                    permutation[0]].copy()
                weights_w = weights_w.transpose()
                weights = [weights_w]
                layer.set_weights(weights)
            else:
                weights_w, weights_b = weights
                weights_w = weights_w.transpose()
                permutation = np.random.permutation(weights_w.shape[0])[:1]
                weights_w[permutation[0]], weights_w[data[1]] = weights_w[data[1]].copy(), weights_w[
                    permutation[0]].copy()
                weights_w = weights_w.transpose()

                weights_b[permutation[0]], weights_b[data[1]] = weights_b[data[1]].copy(), weights_b[
                    permutation[0]].copy()

                weights = weights_w, weights_b
                layer.set_weights(weights)

    return NS_model

def get_trainable_layers(model):
    trainable_layers = []
    i = 0
    for layer in model.layers[1:]:
        i = i + 1
        try:
            weights = layer.get_weights()
            if (len(weights) == 2 or len(weights) == 1):
                trainable_layers.append(model.layers.index(layer))
        except:
            pass

    return trainable_layers

def run_mutant(params):
    params = load_params(params)
    mutate_ops = ['NEB', 'NAI', 'WS', 'GF', 'NS']
    mutate_op = 'GF'
    model = params.model
    data = params.data
    approach = params.approach
    model_path = params.model_path
    mutant_path = params.mutant_path
    deepfault_path = params.deepfault_path
    neural_num = params.neural_num
    model_name = data + '_' + model + '.h5'
    origin_model = load_model(os.path.join(model_path, model_name))
    print("Model structure loaded from ", model_name)
    num_class = params.num_class  # 数据类别数目
    selected_classes = np.arange(num_class)
    for selected_class in selected_classes:
        if selected_class < 10:
            selected_classes_str = '0' + str(selected_class)
        else:
            selected_classes_str = str(selected_class)

        K.clear_session()
        print("selected_class: " + selected_classes_str)
        for mutate_op in mutate_ops:
            save_paths = []
            for i in np.arange(params.pre_neural_num):
                if i < 10:
                    i_str = '0' + str(i)
                else:
                    i_str = str(i)
                save_path = mutant_path + '/' + model + '/' + data + '/' + approach
                save_path = save_path + '/' + data + '_' + model + '_' + selected_classes_str + '_' + approach + '_' + mutate_op + '_' + i_str + '.h5'
                save_paths.append(save_path)

            # fault_path = deepfault_path + '/' + model + '/' + data + '/' + str(
            #     selected_class) + '_' + approach + '_suspicious_neurons.h5'

            # fault_data = get_fault_data(fault_path)
            # get_trainable_layers(origin_model)
            print("fault_data are loaded")
            for i in np.arange(params.pre_neural_num):
                layer_nums = get_trainable_layers(origin_model)
                layer_num = np.random.choice(layer_nums)
                weight_nums = np.array(origin_model.layers[layer_num].get_weights()).shape[-1]
                weight_num = np.random.choice(weight_nums)
                # start = i * neural_num
                # end = start + neural_num
                mutated_model = generate_model_by_model_mutation(model=origin_model, operator=mutate_op,
                                                                    fault_data=[(layer_num, weight_num)])
                print("mutant are done")
                # print(save_paths[i])
                mutated_model.save_weights(save_paths[i])
                print("scores are saved")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Experiments Script For Deepfault")
    parser.add_argument("--params_set", nargs='*', type=str, default=["Car", "DenseNet121"],
                        help="see params folder")
    parser.add_argument("--model", type=str, default="DenseNet121", choices=["INCEPTION_NET", "VGG16", "ResNet50", "DenseNet121"])
    parser.add_argument("--data", type=str, default="Car", choices=["BKK100", "GTSRB", "BIT", "Car"])
    parser.add_argument("--approach", type=str, default="tarantula")
    parser.add_argument("--model_path", type=str, default="./data/neural_networks")
    parser.add_argument("--deepfault_path", type=str, default="./output/deepfault")
    parser.add_argument("--mutant_path", type=str, default="./output/mutant_random")
    # parser.add_argument("--model_path", type=str, default="./data/neural_networks/BIT_RESNET50_model.h5")
    # parser.add_argument("--model_name", type=str, default="BIT_RESNET50_model")
    params = parser.parse_args()
    start_time = time.time()
    run_mutant(params)
    time_passed_min = (time.time() - start_time) / 60
    print("time passed (minutes): %g" % time_passed_min)
