import numpy as np
import os
import sys
from collections import defaultdict
import random
from deeptest_ncoverage import NCoverage
import csv
import cv2
import pickle
from tensorflow.keras.models import load_model, Model
from tensorflow.keras import backend as K
from scipy import misc
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.models import model_from_json
import argparse
from collections import deque
from tqdm import tqdm

class NetModel(object):
    '''
    VGGNet model with integrated neuron coverage
    '''

    def __init__(self, model_path):

        self.model = load_model(model_path)
        self.threshold = 0.1
        self.nc = NCoverage(self.model, self.threshold)

    def predict(self, img, test=0):
        # test == 0: update the coverage only
        # test == 1: test if the input image will increase the current coverage
        img = cv2.resize(img, (32, 32))
        img = np.expand_dims(img, axis=0)
        img = img/255.0
        if test == 1:
            return self.nc.is_testcase_increase_coverage(img)
        else:
            cnn_ndict = self.nc.update_coverage(img)
            cnn_covered_neurons, cnn_total_neurons, cnn_p = self.nc.curr_neuron_cov()
            return cnn_covered_neurons, cnn_total_neurons, cnn_p


def save_object(obj, filename):
    with open(filename, 'wb') as output:
        pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)


def image_translation(img, params):
    if not isinstance(params, list):
        params = [params, params]
    rows, cols, ch = img.shape

    M = np.float32([[1, 0, params[0]], [0, 1, params[1]]])
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst


def image_scale(img, params):
    if not isinstance(params, list):
        params = [params, params]
    res = cv2.resize(img, None, fx=params[0], fy=params[1], interpolation=cv2.INTER_CUBIC)
    return res


def image_shear(img, params):
    rows, cols, ch = img.shape
    factor = params * (-1.0)
    M = np.float32([[1, factor, 0], [0, 1, 0]])
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst


def image_rotation(img, params):
    rows, cols, ch = img.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), params, 1)
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst


def image_contrast(img, params):
    alpha = params
    new_img = cv2.multiply(img, np.array([alpha]))  # mul_img = img*alpha
    # new_img = cv2.add(mul_img, beta)                                  # new_img = img*alpha + beta

    return new_img


def image_brightness(img, params):
    beta = params
    new_img = cv2.add(img, beta)  # new_img = img*alpha + beta

    return new_img


def image_blur(img, params):
    blur = []
    if params == 1:
        blur = cv2.blur(img, (3, 3))
    if params == 2:
        blur = cv2.blur(img, (4, 4))
    if params == 3:
        blur = cv2.blur(img, (5, 5))
    if params == 4:
        blur = cv2.GaussianBlur(img, (3, 3), 0)
    if params == 5:
        blur = cv2.GaussianBlur(img, (5, 5), 0)
    if params == 6:
        blur = cv2.GaussianBlur(img, (7, 7), 0)
    if params == 7:
        blur = cv2.medianBlur(img, 3)
    if params == 8:
        blur = cv2.medianBlur(img, 5)
    if params == 9:
        blur = cv2.blur(img, (6, 6))
    if params == 10:
        blur = cv2.bilateralFilter(img, 9, 75, 75)
    return blur


def rotation(img, params):
    rows, cols, ch = img.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), params[0], 1)
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst


def image_brightness1(img, params):
    w = img.shape[1]
    h = img.shape[0]
    if params > 0:
        for xi in range(0, w):
            for xj in range(0, h):
                if 255 - img[xj, xi, 0] < params:
                    img[xj, xi, 0] = 255
                else:
                    img[xj, xi, 0] = img[xj, xi, 0] + params
                if 255 - img[xj, xi, 1] < params:
                    img[xj, xi, 1] = 255
                else:
                    img[xj, xi, 1] = img[xj, xi, 1] + params
                if 255 - img[xj, xi, 2] < params:
                    img[xj, xi, 2] = 255
                else:
                    img[xj, xi, 2] = img[xj, xi, 2] + params
    if params < 0:
        params = params * (-1)
        for xi in range(0, w):
            for xj in range(0, h):
                if img[xj, xi, 0] - 0 < params:
                    img[xj, xi, 0] = 0
                else:
                    img[xj, xi, 0] = img[xj, xi, 0] - params
                if img[xj, xi, 1] - 0 < params:
                    img[xj, xi, 1] = 0
                else:
                    img[xj, xi, 1] = img[xj, xi, 1] - params
                if img[xj, xi, 2] - 0 < params:
                    img[xj, xi, 2] = 0
                else:
                    img[xj, xi, 2] = img[xj, xi, 2] - params

    return img


def image_brightness2(img, params):
    beta = params
    b, g, r = cv2.split(img)
    b = cv2.add(b, beta)
    g = cv2.add(g, beta)
    r = cv2.add(r, beta)
    new_img = cv2.merge((b, g, r))
    return new_img


def get_filelist(params):
    filelist = []
    data_path = params.data_path
    if params.data == "GTSRB":
        data_path = os.path.join(data_path, 'GTSRB/Final_Training/Images')
        for file in tqdm(os.listdir(data_path)):
            for photo_file in os.listdir(data_path + '/' + file):
                if (photo_file[0] == 'G'):
                    continue
                photo_file_path = data_path + '/' + file + '/' + photo_file
                filelist.append(photo_file_path)
    elif params.data == "BIT":
        data_path = os.path.join(data_path, 'BIT')
        for photo_file in os.listdir(data_path):
            photo_file_path = data_path + '/' + photo_file
            filelist.append(photo_file_path)
    elif params.data == "Car":
        data_path = os.path.join(data_path, 'car_ims')
        for photo_file in os.listdir(data_path):
            photo_file_path = data_path + '/' + photo_file
            filelist.append(photo_file_path)
    return filelist

def vgg_guided(params):
    model = params.model
    data = params.data
    model_path = params.model_path
    deeptest_path = params.deeptest_path

    filelist = get_filelist(params)
    np.random.shuffle(filelist)

    new_input = deeptest_path + '/' + model + '/' + data + '/' + "new"
    pkl_input = deeptest_path + '/' + model + '/' + data + '/' + "pkl"
    result_input = deeptest_path + '/' + model + '/' + data + '/' + "result"
    # Model build
    # ---------------------------------------------------------------------------------

    K.set_learning_phase(0)
    model_name = data + '_' + model + '.h5'
    model = NetModel(os.path.join(model_path, model_name))

    newlist = []
    for file in sorted(os.listdir(new_input)):
        if file.endswith(".jpg"):
            newlist.append(file)
    flag = 0
    # flag:0 start from beginning
    # flag:1 initialize from pickle files
    covdict2_path = os.path.join(pkl_input, "covdict2.pkl")
    stack_path = os.path.join(pkl_input, "stack.pkl")
    queue_path = os.path.join(pkl_input, "queue.pkl")
    generated_path = os.path.join(pkl_input, "generated.pkl")
    '''
    Pickle files are used for continuing the search after rerunning the script.
    Delete all pkl files and generated images for starting from the beginnning.
    '''
    if os.path.isfile(covdict2_path) and \
            os.path.isfile(stack_path) and \
            os.path.isfile(queue_path) and \
            os.path.isfile(generated_path):
        with open(covdict2_path, 'rb') as input:
            covdict = pickle.load(input)
        with open(stack_path, 'rb') as input:
            vgg_stack = pickle.load(input)
        with open(queue_path, 'rb') as input:
            vgg_queue = pickle.load(input)
        with open(generated_path, 'rb') as input:
            generated = pickle.load(input)
        flag = 1

    if flag == 0:
        filewrite = "w"
        queue = deque()
        stack = []
        generated = 0
    else:
        model.nc.set_covdict(covdict)
        filewrite = "a"
        print("initialize from files")

    C = 0  # covered neurons
    P = 0  # covered percentage
    T = 0  # total neurons
    transformations = [image_translation, image_scale, image_shear,
                       image_rotation, image_contrast, image_brightness2, image_blur]
    params = []
    params.append(list(range(-50, 50)))
    params.append(list(map(lambda x: x * 0.1, list(range(5, 20)))))
    params.append(list(map(lambda x: x * 0.1, list(range(-5, 5)))))
    params.append(list(range(-30, 30)))
    params.append(list(map(lambda x: x * 0.1, list(range(1, 20)))))
    params.append(list(range(-21, 21)))
    params.append(list(range(1, 11)))

    maxtrynumber = 10
    cache = deque()
    # load nc, generation, population


    with open(os.path.join(result_input, "rq3_100_2.csv"), filewrite, encoding='utf-8', newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',',
                            quotechar='|', quoting=csv.QUOTE_MINIMAL)
        if flag == 0:
            writer.writerow(['id', 'seed image(root)', 'parent image', 'generated images',
                             'total_covered', 'total_neurons', 'coverage_percentage'])
            # initialize population and coverage
            print("compute coverage of original population")
            # input_images = range(0, 100)
            for f in filelist[:100]:
                queue.append(f)
        # exitcount = 0
        while len(queue) > 0:
            current_seed_image = queue[0]
            print(str(len(queue)) + " images are left.")
            if len(stack) == 0:
                stack.append(current_seed_image)

            while len(stack) > 0:
                try:
                    image_file = stack[-1]
                    print("current image in stack " + image_file)
                    image = cv2.imread(image_file)
                    covered, total, p = model.predict(image)
                    new_generated = False
                    for i in range(maxtrynumber):

                        # exitcount = exitcount + 1
                        tid = random.sample([0, 1, 2, 3, 4, 5, 6], 2)
                        if len(cache) > 0:
                            tid[0] = cache.popleft()
                        for j in range(2):
                            new_image = image
                            transformation = transformations[tid[j]]
                            # random choose parameter
                            param = random.sample(params[tid[j]], 1)
                            param = param[0]
                            print("transformation " + str(transformation) + "  parameter " + str(param))
                            new_image = transformation(new_image, param)

                        if model.predict(new_image, test=1):
                            print("Generated image increases coverage and will be added to population.")
                            cache.append(tid[0])
                            cache.append(tid[1])
                            generated = generated + 1
                            name = os.path.basename(current_seed_image) + '_' + str(generated) + '.jpg'
                            name = os.path.join(new_input, name)
                            cv2.imwrite(name, new_image)
                            stack.append(name)

                            covered, total, p = model.predict(image)
                            C = covered
                            T = total
                            P = p
                            csvrecord = []
                            csvrecord.append(100 - len(queue))
                            csvrecord.append(os.path.basename(current_seed_image))
                            if len(stack) >= 2:
                                parent = os.path.basename(stack[-2])
                            else:
                                parent = os.path.basename(current_seed_image)
                            csvrecord.append(parent)
                            csvrecord.append(generated)
                            csvrecord.append(C)
                            csvrecord.append(T)
                            csvrecord.append(P)
                            print(csvrecord)
                            writer.writerow(csvrecord)
                            new_generated = True
                            break
                        else:
                            print("Generated image does not increase coverage.")
                        '''
                        # If the memory is not enough, the following code can be used to exit.
                        # Re-runing the script will continue from previous progree.
                        print("exitcount: " + str(exitcount))
                        if exitcount % 30 == 0:
                            exit()
                        '''

                    if not new_generated:
                        stack.pop()

                    save_object(stack, stack_path)
                    save_object(queue, queue_path)
                    save_object(model.nc.cov_dict, covdict2_path)
                    save_object(generated, generated_path)
                    '''
                        # If the memory is not enough, the following code can be used to exit.
                        # Re-runing the script will continue from previous progree.
                        print("exitcount: " + str(exitcount))
                        if exitcount % 30 == 0:
                            exit()
                    '''

                except:
                    print("value error")
                    stack.pop()
                    save_object(stack, stack_path)
                    save_object(queue, queue_path)

            queue.popleft()
            # maxtrynumber = maxtrynumber + 10


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--params_set", nargs='*', type=str, default=["Car", "DenseNet121"],
                        help="see params folder")
    parser.add_argument("--model", type=str, default="DenseNet121")
    parser.add_argument("--data", type=str, default="Car")
    parser.add_argument("--model_path", type=str, default="./data/neural_networks")
    parser.add_argument("--deeptest_path", type=str, default="./output/deeptest")
    parser.add_argument("--data_path", type=str, default="./data/data_set")
    # parser.add_argument('--dataset', type=str, default='./dataset/BITVehicle_Dataset/',
    #                     help='path for dataset')
    args = parser.parse_args()
    vgg_guided(args)
