import argparse
import os
import numpy as np
import cv2
from sklearn import preprocessing
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model, load_model, model_from_json
from PIL import Image
import imagehash
import csv
from run_experiment import load_params
import time

def get_grad_data(grad_path):
    seed_num = 0
    num_set = set()
    grad_new_inputs = np.load(os.path.join(grad_path, 'new_inputs.npy'))
    grad_origin_inputs = np.load(os.path.join(grad_path, 'orgin_inputs.npy'))
    grad_new_outputs = np.load(os.path.join(grad_path, 'new_outputs.npy'))
    grad_origin_inputs_png = grad_origin_inputs * 255
    grad_origin_inputs_png[grad_origin_inputs_png > 255] = 255
    grad_origin_inputs_png[grad_origin_inputs_png < 0] = 0
    for origin_input in grad_origin_inputs_png:
        origin_image = Image.fromarray(np.uint8(origin_input))
        origin_image_hash = imagehash.phash(origin_image)
        num_set.add(origin_image_hash)
    seed_num = len(num_set)
    print("grad_seed_num：" + str(seed_num))
    return grad_new_inputs, grad_origin_inputs, grad_new_outputs, seed_num



def get_deeptest_data(deeptest_path, params, num_class):
    deeptest_new_inputs = []
    deeptest_origin_inputs = []
    deeptest_new_outputs = []
    seed_num = 0
    num_set = set()
    if params.data == "GTSRB":
        orgin_path = "./data/data_set/GTSRB/Final_Training/Images"
        for file in os.listdir(deeptest_path):
            lab = int(file[0:5])
            num_set.add(file[0:9])
            index = file.find('ppm')
            origin_file_name = file[:index + 3]
            new_file_path = os.path.join(deeptest_path, file)
            origin_file_path = orgin_path + '/' + file[0:5] + '/' + origin_file_name
            new_img = cv2.imread(new_file_path)
            origin_img = cv2.imread(origin_file_path)
            new_img = cv2.resize(new_img, (32, 32))
            origin_img = cv2.resize(origin_img, (32, 32))
            deeptest_new_inputs.append(new_img)
            deeptest_origin_inputs.append(origin_img)
            deeptest_new_outputs.append(lab)
    elif params.data == "BIT":
        label_path = "./data/data_set/BIT_label.csv"
        orgin_path = "./data/data_set/BIT"
        label_dict = {}
        f = csv.reader(open(label_path, 'r', encoding='utf-8'))
        for lists in f:
            label_dict[lists[1]] = lists[2]

        for file in os.listdir(deeptest_path):
            origin_file_name = file[:19]
            num_set.add(file[0:19])
            new_file_path = os.path.join(deeptest_path, file)
            origin_file_path = orgin_path + '/' + origin_file_name
            new_img = cv2.imread(new_file_path)
            origin_img = cv2.imread(origin_file_path)
            new_img = cv2.resize(new_img, (32, 32))
            origin_img = cv2.resize(origin_img, (32, 32))
            deeptest_new_inputs.append(new_img)
            deeptest_origin_inputs.append(origin_img)
            deeptest_new_outputs.append(int(label_dict[origin_file_name]))
    elif params.data == "Car":
        label_path = "./data/data_set/Car_label.csv"
        orgin_path = "./data/data_set/car_ims"
        f = csv.reader(open(label_path, 'r', encoding='utf-8'))
        label_dict = {}
        for lists in f:
            label_dict[lists[1]] = lists[2]

        for file in os.listdir(deeptest_path):
            origin_file_name = file[:10]
            num_set.add(file[0:10])
            new_file_path = os.path.join(deeptest_path, file)
            origin_file_path = orgin_path + '/' + origin_file_name
            new_img = cv2.imread(new_file_path)
            origin_img = cv2.imread(origin_file_path)
            new_img = cv2.resize(new_img, (32, 32))
            origin_img = cv2.resize(origin_img, (32, 32))
            deeptest_new_inputs.append(new_img)
            deeptest_origin_inputs.append(origin_img)
            deeptest_new_outputs.append(int(label_dict['car_ims/'+origin_file_name]))

    deeptest_new_inputs = np.array(deeptest_new_inputs)
    deeptest_origin_inputs = np.array(deeptest_origin_inputs)
    deeptest_new_outputs = np.array(deeptest_new_outputs)
    lb = preprocessing.LabelBinarizer().fit(np.array(range(num_class)))  # 对标签进行ont_hot编码
    deeptest_new_outputs = lb.transform(deeptest_new_outputs)  # 因为是多分类任务，必须进行编码处理
    seed_num = len(num_set)
    print("deeptest_seed_num：" + str(seed_num))
    return deeptest_new_inputs, deeptest_origin_inputs, deeptest_new_outputs


def imageSimilarity(new_img, origin_img):
    new_image_hash = imagehash.phash(new_img)
    origin_image_hash = imagehash.phash(origin_img)
    return (new_image_hash.hash == origin_image_hash.hash).sum() / 64


def imageFidelity(new_inputs, origin_inputs, new_outputs):
    indexs = []
    i = 0
    for new_input, origin_input, new_output in zip(new_inputs, origin_inputs, new_outputs):
        new_image = Image.fromarray(np.uint8(new_input))
        origin_image = Image.fromarray(np.uint8(origin_input))
        #         print(imageSimilarity(new_image, origin_image))
        if imageSimilarity(new_image, origin_image) > 0.6:
            indexs.append(i)
        i = i + 1
    return indexs


def calculate_error_rate(origin_model, grad_new_inputs, deeptest_new_inputs, grad_new_outputs,
                         deeptest_new_outputs):
    grad_perdict = origin_model(grad_new_inputs)
    deeptest_perdict = origin_model(deeptest_new_inputs)
    grad_perdict_arg = np.argmax(grad_perdict, axis=1)
    grad_new_outputs_arg = np.argmax(grad_new_outputs, axis=1)
    deeptest_perdict_arg = np.argmax(deeptest_perdict, axis=1)
    deeptest_new_outputs_arg = np.argmax(deeptest_new_outputs, axis=1)
    grad_error_rate = (grad_perdict_arg != grad_new_outputs_arg).sum() / len(grad_new_outputs_arg)
    deeptest_error_rate = (deeptest_perdict_arg != deeptest_new_outputs_arg).sum() / len(deeptest_new_outputs_arg)
    return grad_error_rate, deeptest_error_rate, grad_perdict_arg, deeptest_perdict_arg


def calculate_kill_rate(grad_new_inputs, params, deeptest_new_inputs, grad_perdict_arg, deeptest_perdict_arg):
    mutation_path = "D:\Projects\PycharmProjects\Grad_/output/mutant/" + params.model + "/" + params.data + "/" + params.approach
    json_path = "D:\Projects\PycharmProjects\Grad_/data/neural_networks/" + params.data + "_" + params.model + ".json"
    # pre_neural_num = int(params.pre_neural_num)
    pre_neural_num = int(50)
    num_class = int(params.num_class)
    json_file = open(json_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    muta_model = model_from_json(loaded_model_json)
    # mutation_path = "./data/output/mutant/BIT_RESNET50/tarantula"
    filelist = []
    reward_table = [[0 for j in range(pre_neural_num)] for i in range(num_class)]
    for file in sorted(os.listdir(mutation_path)):
        filelist.append(file)
    grad_kill_num = 0
    deeptest_kill_num = 0
    i = 0
    for f in filelist:
        index1 = int(i / pre_neural_num)
        index2 = int(i % pre_neural_num)
        i = i + 1
        muta_model.load_weights(os.path.join(mutation_path, f))
        grad_muta_perdict = muta_model(grad_new_inputs)
        deeptest_muta_perdict = muta_model(deeptest_new_inputs)
        grad_muta_perdict_arg = np.argmax(grad_muta_perdict, axis=1)
        deeptest_muta_perdict_arg = np.argmax(deeptest_muta_perdict, axis=1)
        if (grad_perdict_arg != grad_muta_perdict_arg).sum() > 0:
            grad_kill_num += 1
            reward_table[index1][index2] = 1
        if (deeptest_perdict_arg != deeptest_muta_perdict_arg).sum() > 0:
            deeptest_kill_num += 1
        K.clear_session()
    grad_kill_rate = grad_kill_num / len(filelist)
    deeptest_kill_rate = deeptest_kill_num / len(filelist)
    return grad_kill_rate, deeptest_kill_rate, reward_table


def scale(layer_outputs, rmax=1, rmin=0):
    divider = (layer_outputs.max() - layer_outputs.min())
    if divider == 0:
        return np.zeros(shape=layer_outputs.shape)
    X_std = (layer_outputs - layer_outputs.min()) / divider
    X_scaled = X_std * (rmax - rmin) + rmin
    return X_scaled


def get_covered_neurons(origin_model, test_inputs):
    threshold = 0.2
    exclude_layer = ['pool', 'fc', 'flatten']
    model = origin_model
    #     print('models loaded')

    # the layers that are considered in neuron coverage computation
    layer_to_compute = []

    for layer in model.layers[1:]:
        if all(ex not in layer.name for ex in exclude_layer):
            layer_to_compute.append(layer.name)

    # init coverage table
    cov_dict = {}

    for layer_name in layer_to_compute:
        for index in range(model.get_layer(layer_name).output_shape[-1]):
            cov_dict[(layer_name, index)] = False

    for layer_name in layer_to_compute:
        layer_model = Model(model.inputs, model.get_layer(layer_name).output)
        # print(input_data.shape)
        layer_outputs = np.array(layer_model(test_inputs))

        for layer_output in layer_outputs:
            scaled = scale(layer_output)
            # print(scaled.shape)
            for neuron_idx in range(scaled.shape[-1]):
                if np.mean(scaled[..., neuron_idx]) > threshold:
                    cov_dict[(layer_name, neuron_idx)] = True
    covered_neurons = len([v for v in cov_dict.values() if v])
    total_neurons = len(cov_dict)
    return covered_neurons / float(total_neurons)


def run_evaluate(params):
    # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    params = load_params(params)
    model = params.model
    data = params.data
    model_path = params.model_path
    # model = params.model
    grad_path = "D:\Projects\PycharmProjects\Grad_/output/grad/" + model + '/' + data + '/' + "generated_samples"
    # deepsmartfuzz_path = params.deepsmartfuzz_path
    deeptest_path = "D:\Projects\PycharmProjects\Grad_/output/deeptest/" + model + '/' + data + '/' + "new"
    # deeptest_path = params.deeptest_path
    # BIT_path = params.BIT_path
    # model_path = params.model_path
    num_class = params.num_class
    grad_new_inputs, grad_origin_inputs, grad_new_outputs = get_grad_data(grad_path)
    deeptest_new_inputs, deeptest_origin_inputs, deeptest_new_outputs = get_deeptest_data(deeptest_path, params,
                                                                                          num_class)
    print("None Fidelity is begining:")
    # deepsmartfuzz_new_inputs_temp = deepsmartfuzz_new_inputs / 255.
    deeptest_new_inputs = deeptest_new_inputs / 255.
    print("grad_len: " + str(len(grad_new_inputs)))
    print("deeptest_len: " + str(len(deeptest_new_inputs)))
    model_name = data + '_' + model + '.h5'
    origin_model = load_model(os.path.join(model_path, model_name))
    # origin_model = load_model(model_path)
    grad_error_rate, deeptest_error_rate, grad_perdict_arg, deeptest_perdict_arg = calculate_error_rate(
        origin_model, grad_new_inputs, deeptest_new_inputs, grad_new_outputs,
        deeptest_new_outputs)
    print("grad_error_rate: " + str(grad_error_rate))
    print("deeptest_error_rate: " + str(deeptest_error_rate))
    # grad_kill_rate, deeptest_kill_rate, reward_table = calculate_kill_rate(grad_new_inputs, params,
    #                                                                                 deeptest_new_inputs,
    #                                                                                 grad_perdict_arg,
    #                                                                                 deeptest_perdict_arg)
    # kill_per_class = [0 for j in range(num_class)]
    # kill_per_class_str = ""
    # for i in range(num_class):
    #     kill_per_class[i] = np.array(reward_table[i]).sum()
    #     kill_per_class_str = kill_per_class_str + "," + str(kill_per_class[i])
    #
    # print(kill_per_class_str)
    #
    # print("grad_kill_rate: " + str(grad_kill_rate))
    # print("deeptest_kill_rate: " + str(deeptest_kill_rate))
    grad_cover_rate = get_covered_neurons(origin_model, grad_new_inputs)
    deeptest_cover_rate = get_covered_neurons(origin_model, deeptest_new_inputs)
    print("grad_cover_rate: " + str(grad_cover_rate))
    print("deeptest_cover_rate: " + str(deeptest_cover_rate))

    print("Fidelity is begining:")
    # get png data
    grad_new_inputs_png = grad_new_inputs * 255
    grad_new_inputs_png[grad_new_inputs_png > 255] = 255
    grad_new_inputs_png[grad_new_inputs_png < 0] = 0
    grad_origin_inputs_png = grad_origin_inputs * 255
    grad_origin_inputs_png[grad_origin_inputs_png > 255] = 255
    grad_origin_inputs_png[grad_origin_inputs_png < 0] = 0
    # caculate imageFidelity and get index
    grad_indexs = imageFidelity(grad_new_inputs_png, grad_origin_inputs_png, grad_new_outputs)
    deeptest_indexs = imageFidelity(deeptest_new_inputs * 255, deeptest_origin_inputs, deeptest_new_outputs)
    # get data by index
    grad_new_inputs = grad_new_inputs[grad_indexs]
    grad_new_outputs = grad_new_outputs[grad_indexs]
    deeptest_new_inputs = deeptest_new_inputs[deeptest_indexs]
    deeptest_new_outputs = deeptest_new_outputs[deeptest_indexs]

    print("grad_len: " + str(len(grad_new_inputs)))
    print("deeptest_len: " + str(len(deeptest_new_inputs)))

    grad_error_rate, deeptest_error_rate, grad_perdict_arg, deeptest_perdict_arg = calculate_error_rate(
        origin_model, grad_new_inputs, deeptest_new_inputs, grad_new_outputs,
        deeptest_new_outputs)
    print("grad_error_rate: " + str(grad_error_rate))
    print("deeptest_error_rate: " + str(deeptest_error_rate))
    # grad_kill_rate, deeptest_kill_rate, reward_table = calculate_kill_rate(grad_new_inputs, params,
    #                                                                                 deeptest_new_inputs,
    #                                                                                 grad_perdict_arg,
    #                                                                                 deeptest_perdict_arg)
    #
    # kill_per_class = [0 for j in range(num_class)]
    # kill_per_class_str = ""
    # for i in range(num_class):
    #     kill_per_class[i] = np.array(reward_table[i]).sum()
    #     kill_per_class_str = kill_per_class_str + "," + str(kill_per_class[i])
    #
    # print(kill_per_class_str)
    #
    # print("deepsmartfuzz_kill_rate: " + str(grad_kill_rate))
    # print("deeptest_kill_rate: " + str(deeptest_kill_rate))
    deepsmartfuzz_cover_rate = get_covered_neurons(origin_model, grad_new_inputs)
    deeptest_cover_rate = get_covered_neurons(origin_model, deeptest_new_inputs)
    print("deepsmartfuzz_cover_rate: " + str(deepsmartfuzz_cover_rate))
    print("deeptest_cover_rate: " + str(deeptest_cover_rate))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Experiments Script For evaluate")
    parser.add_argument("--params_set", nargs='*', type=str, default=["Car", "VGG16"],
                        help="see params folder")
    parser.add_argument("--model", type=str, default="VGG16", choices=["INCEPTION_NET", "VGG16", "ResNet50", "DenseNet121"])
    parser.add_argument("--data", type=str, default="Car", choices=["BKK100", "GTSRB", "BIT", "Car"])
    parser.add_argument("--approach", type=str, default="tarantula")
    parser.add_argument("--model_path", type=str, default="./data/neural_networks")
    parser.add_argument("--deepfault_path", type=str, default="./output/deepfault")
    parser.add_argument("--mutant_path", type=str, default="./output/mutant")
    # parser.add_argument("--grad_path", type=str, default="data/result/deepsmartfuzz/3072-15h")
    # parser.add_argument("--deeptest_path", type=str, default="data/result/deeptest/100-0.2-36h/new")
    # parser.add_argument("--model_path", type=str, default="./data/neural_networks/BIT_RESNET50_model.h5")
    # parser.add_argument("--BIT_path", type=str, default="./dataset/BITVehicle_Dataset")
    params = parser.parse_args()
    start_time = time.time()
    run_evaluate(params)
    time_passed_min = (time.time() - start_time) / 60
    print("time passed (minutes): %g" % time_passed_min)
