import os
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tqdm import tqdm
from run_experiment import load_params
import cv2
from sklearn import preprocessing
import argparse
import math
import h5py
import numpy as np
import csv
import time

def get_data(params):
    X_data = []
    Y_data = []
    input_shape = params.input_shape[1:]  # 3通道图像数据
    if params.data == "GTSRB":
        data_path = params.data_path
        data_path = os.path.join(data_path, 'GTSRB/Final_Training/Images')
        for file in tqdm(os.listdir(data_path)):
            lab = int(file)
            for photo_file in os.listdir(data_path + '/' + file):
                if photo_file[0] == 'G':
                    continue
                photo_file_path = data_path + '/' + file + '/' + photo_file
                img = cv2.imread(photo_file_path, 1)
                img = cv2.resize(img, (input_shape[0], input_shape[1]))
                X_data.append(img)
                Y_data.append(lab)
        print("dataset are loaded")
        X_data = np.array(X_data)
        X_data = X_data / 255.0
        Y_data = np.array(Y_data)
        lb = preprocessing.LabelBinarizer().fit(np.array(range(params.num_class)))  # 对标签进行ont_hot编码
        Y_data = lb.transform(Y_data)  # 因为是多分类任务，必须进行编码处理
        print("preprocess are done")
    elif params.data == "BIT":
        f = csv.reader(open('./data/data_set/BIT_label.csv', 'r', encoding='utf-8'))
        print("readered label.csv")
        data_path = params.data_path
        data_path = os.path.join(data_path, params.data)
        for lists in f:
            print(lists)
            img = cv2.imread(os.path.join(data_path, lists[1]))
            img = cv2.resize(img, (input_shape[0], input_shape[1]))
            X_data.append(img)
            Y_data.append(int(lists[2]))

        print("dataset are loaded")
        X_data = np.array(X_data)
        X_data = X_data / 255.0
        Y_data = np.array(Y_data)
        lb = preprocessing.LabelBinarizer().fit(np.array(range(params.num_class)))  # 对标签进行ont_hot编码
        Y_data = lb.transform(Y_data)  # 因为是多分类任务，必须进行编码处理
        print("preprocess are done")
    elif params.data == "Car":
        f = csv.reader(open('./data/data_set/Car_label.csv', 'r', encoding='utf-8'))
        print("readered label.csv")
        data_path = params.data_path
        for lists in f:
            if (lists[3] == '1'):
                print(lists)
                img = cv2.imread(os.path.join(data_path, lists[1]))
                img = cv2.resize(img, (input_shape[0], input_shape[1]))
                X_data.append(img)
                Y_data.append(int(lists[2]))

        print("dataset are loaded")
        X_data = np.array(X_data)
        X_data = X_data / 255.0
        Y_data = np.array(Y_data)
        lb = preprocessing.LabelBinarizer().fit(np.array(range(params.num_class)))  # 对标签进行ont_hot编码
        Y_data = lb.transform(Y_data)  # 因为是多分类任务，必须进行编码处理
        print("preprocess are done")

    return X_data, Y_data


def save_suspicious_neurons(suspicious_neurons, filename, group_index, approach):
    filename = filename + approach + '_suspicious_neurons.h5'
    with h5py.File(filename, 'a') as hf:
        group = hf.create_group('group' + str(group_index))
        for i in range(len(suspicious_neurons)):
            group.create_dataset("suspicious_neurons" + str(i), data=suspicious_neurons[i])

    print("Suspicious neurons saved in ", filename)
    return


star = 3


def dstar_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num, star):
    """
    More information on DStar fault localization technique can be found in
    [3]
    """

    def dstar(i, j):
        return float(num_cf[i][j] ** star) / (num_cs[i][j] + num_uf[i][j])

    return scores_with_foo(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num, dstar)


def tarantula_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num):
    """
    More information on Tarantula fault localization technique can be found in
    [1]
    """

    def tarantula(i, j):
        return float(float(num_cf[i][j]) / (num_cf[i][j] + num_uf[i][j])) / \
               (float(num_cf[i][j]) / (num_cf[i][j] + num_uf[i][j]) + float(num_cs[i][j]) / (
                       num_cs[i][j] + num_us[i][j]))

    return scores_with_foo(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num, tarantula)


def ochiai_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num):
    """
    More information on Ochiai fault localization technique can be found in
    [2]
    """

    def ochiai(i, j):
        return float(num_cf[i][j]) / ((num_cf[i][j] + num_uf[i][j]) * (num_cf[i][j] + num_cs[i][j])) ** (.5)

    return scores_with_foo(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num, ochiai)


def filter_val_set(desired_class, X, Y):
    X_class = []
    Y_class = []
    i = 0
    for x, y in zip(X, Y):
        if i > 200:
            break
        if y[desired_class] == 1:
            X_class.append(x)
            Y_class.append(y)
            i = i + 1

    print("Validation set filtered for desired class: " + str(desired_class))

    return np.array(X_class), np.array(Y_class)


def get_trainable_layers(model):
    trainable_layers = []
    i = 0
    for layer in model.layers[1:]:
        i = i + 1
        try:
            weights = layer.get_weights()
            if (len(weights) == 2 or len(weights) == 1):
                trainable_layers.append(model.layers.index(layer))
        except:
            pass

    return trainable_layers


def get_layer_outs(model, test_input):
    inp = model.input  # input placeholder
    outputs = [layer.output for layer in model.layers]  # all layer outputs
    functors = [K.function([inp], [out]) for out in outputs]  # evaluation functions
    layer_outs = [func([test_input]) for func in functors]

    return layer_outs


def model_test(model, X_test, Y_test, selected_class):
    """
    Test a neural network.
    :return: indexes from testing set of correct and incorrect classifications
    """

    # Find activations of each neuron in each layer for each input x in X_test
    layer_outs = get_layer_outs(model, X_test)
    print("layer_outs")
    # Print information about the model
    #     print(model.summary())

    # Evaluate the model
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('[loss, accuracy] -> ' + str(score))

    # Make predictions
    Y_pred = model.predict(X_test)

    # Calculate classification report and confusion matrix
    # calculate_prediction_metrics(Y_test, Y_pred, score)

    # Find test and prediction classes
    expectations = Y_test[:, selected_class]
    predictions = Y_pred[:, selected_class].round()

    classifications = np.absolute(expectations - predictions)

    # Find correct classifications and misclassifications
    correct_classifications = []
    misclassifications = []
    for i in range(0, len(classifications)):
        if classifications[i] == 0:
            correct_classifications.append(i)
        else:
            misclassifications.append(i)

    print("model test done!\n")

    return correct_classifications, misclassifications, layer_outs, predictions


def construct_spectrum_matrices(model, trainable_layers,
                                correct_classifications, misclassifications,
                                layer_outs, activation_threshold=0.1):
    scores = []
    num_cf = []
    num_uf = []
    num_cs = []
    num_us = []
    for tl in trainable_layers:
        # print(model.layers[tl].output_shape)
        num_cf.append(np.zeros(model.layers[tl].output_shape[-1]))  # covered (activated) and failed
        num_uf.append(np.zeros(model.layers[tl].output_shape[-1]))  # uncovered (not activated) and failed
        num_cs.append(np.zeros(model.layers[tl].output_shape[-1]))  # covered and succeeded
        num_us.append(np.zeros(model.layers[tl].output_shape[-1]))  # uncovered and succeeded
        scores.append(np.zeros(model.layers[tl].output_shape[-1]))

    for tl in trainable_layers:
        print(tl)
        layer_idx = trainable_layers.index(tl)
        all_neuron_idx = range(model.layers[tl].output_shape[-1])
        test_idx = 0
        for l in layer_outs[tl][0]:
            for neuron_idx in range(model.layers[tl].output_shape[-1]):
                if test_idx in correct_classifications and np.mean(l[..., neuron_idx]) > activation_threshold:
                    num_cs[layer_idx][neuron_idx] += 1
                elif test_idx in correct_classifications and np.mean(l[..., neuron_idx]) < activation_threshold:
                    num_us[layer_idx][neuron_idx] += 1
                elif test_idx in misclassifications and np.mean(l[..., neuron_idx]) > activation_threshold:
                    num_cf[layer_idx][neuron_idx] += 1
                else:
                    num_uf[layer_idx][neuron_idx] += 1

            test_idx += 1

    return scores, num_cf, num_uf, num_cs, num_us


def scores_with_foo(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, suspicious_num, foo):
    for i in range(len(scores)):
        for j in range(len(scores[i])):
            score = foo(i, j)
            if np.isnan(score):
                score = 0
            scores[i][j] = score

    flat_scores = [float(item) for sublist in scores for item in sublist if not math.isnan(float(item))]
    # print(flat_scores)
    # grab the indexes of the highest suspicious_num scores
    if suspicious_num >= len(flat_scores):
        flat_indexes = range(len(flat_scores))
    else:
        flat_indexes = np.argsort(flat_scores)[-suspicious_num:][::-1]
    # print(flat_indexes)
    suspicious_neuron_idx = []
    for idx in flat_indexes:
        # unflatten idx
        i = 0
        accum = idx
        while accum >= len(scores[i]):
            accum -= len(scores[i])
            i += 1
        j = accum

        if trainable_layers is None:
            suspicious_neuron_idx.append((i, j))
        else:
            suspicious_neuron_idx.append((trainable_layers[i], j))

    return suspicious_neuron_idx

def run_deepfault(params):
    params = load_params(params)
    print("begin")
    model = params.model
    data = params.data
    model_path = params.model_path
    data_path = params.data_path
    deepfault_path = params.deepfault_path
    print("load param")
    susp_num = 1000
    num_class = params.num_class  # 数据类别数目
    selected_classes = np.arange(num_class)
    X_data, Y_data = get_data(params)
    model_name = data + '_' + model + '.h5'
    origin_model = load_model(os.path.join(model_path, model_name))
    trainable_layers = get_trainable_layers(origin_model)
    print("Model structure loaded from ", model_name)
    for selected_class in selected_classes:
        print("selected_class: " + str(selected_class))
        X_val, Y_val = filter_val_set(selected_class, X_data, Y_data)

        filename = deepfault_path + '/' + model + '/' + data + '/' + str(selected_class) + '_'

        correct_classifications, misclassifications, layer_outs, predictions = model_test(origin_model, X_val, Y_val,
                                                                                          selected_class)
        scores, num_cf, num_uf, num_cs, num_us = construct_spectrum_matrices(origin_model, trainable_layers,
                                                                             correct_classifications,
                                                                             misclassifications,
                                                                             layer_outs)
        print("scores are done")
        suspicious_neuron_idx_dstar = dstar_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us, susp_num,
                                                     star)
        print("suspicious_neuron_idx_dstar")
        print(suspicious_neuron_idx_dstar)
        suspicious_neuron_idx_tarantula = tarantula_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us,
                                                             susp_num)
        print("suspicious_neuron_idx_tarantula")
        print(suspicious_neuron_idx_tarantula)
        suspicious_neuron_idx_ochiai = ochiai_analysis(trainable_layers, scores, num_cf, num_uf, num_cs, num_us,
                                                       susp_num)
        print("suspicious_neuron_idx_ochiai")
        print(suspicious_neuron_idx_ochiai)

        group_index = 1
        save_suspicious_neurons(suspicious_neuron_idx_dstar, filename, group_index, 'dstar')
        save_suspicious_neurons(suspicious_neuron_idx_tarantula, filename, group_index, 'tarantula')
        save_suspicious_neurons(suspicious_neuron_idx_ochiai, filename, group_index, 'ochiai')
        K.clear_session()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Experiments Script For Deepfault")
    print("parse_args")
    parser.add_argument("--params_set", nargs='*', type=str, default=["GTSRB", "VGG16"],
                        help="see params folder")
    parser.add_argument("--model", type=str, default="VGG16", choices=["INCEPTION_NET", "VGG16", "ResNet50", "DenseNet121"])
    parser.add_argument("--data", type=str, default="GTSRB", choices=["BKK100", "GTSRB", "BIT", "Car"])
    parser.add_argument("--deepfault_path", type=str, default="./output/deepfault")
    parser.add_argument("--model_path", type=str, default="./data/neural_networks")
    parser.add_argument("--data_path", type=str, default="./data/data_set")
    params = parser.parse_args()
    start_time = time.time()
    run_deepfault(params)
    time_passed_min = (time.time() - start_time) / 60
    print("time passed (minutes): %g" % time_passed_min)