import numpy as np
import time
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tqdm import tqdm
import os
import cv2
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import csv

class Experiment:
    pass


def get_experiment(params):
    experiment = Experiment()
    experiment.datasets = _get_dataset(params, experiment)
    experiment.model = _get_model(params, experiment)
    experiment.coverage = _get_coverage(params, experiment)
    experiment.input_chooser = _get_input_chooser(params, experiment)
    experiment.start_time = time.time()
    experiment.iteration = 0
    experiment.termination_condition = generate_termination_condition(experiment, params)
    return experiment


def generate_termination_condition(experiment, params):
    input_chooser = experiment.input_chooser
    nb_new_inputs = params.nb_new_inputs
    start_time = experiment.start_time
    time_period = params.time_period
    coverage = experiment.coverage
    nb_iterations = params.nb_iterations

    def termination_condition():
        c1 = len(input_chooser) - input_chooser.initial_nb_inputs > nb_new_inputs
        c2 = time.time() - start_time > time_period
        c3 = coverage.get_current_coverage() == 100
        c4 = nb_iterations is not None and experiment.iteration > nb_iterations
        return c1 or c2 or c3 or c4

    return termination_condition


# created function to extract 'category' from labels' column in each row
def clean_labels(example):
    """
    Extracting 'category' from each row and adding unique words to a list.

    Parameters:
    ----------
        example: dataframe column map function
                df['labels'].map(clean_labels)

    Returns:
    -------
        a list of unique words separated by a comma

    """
    example_df = pd.DataFrame.from_records(example)
    example_df = example_df['category'].unique().tolist()
    return ','.join(example_df)


def _get_dataset(params, experiment):
    if params.dataset == "Car":
        train_images = []
        test_images = []
        train_labels = []
        test_labels = []

        input_shape = (128, 128, 3)  # 3通道图像数据
        num_class = 49  # 数据类别数目
        f = csv.reader(open('D:\Projects\PycharmProjects\Grad_/data/data_set/Car_label.csv', 'r', encoding='utf-8'))
        print("readered label.csv")
        data_path = "D:\Projects\PycharmProjects\Grad_/data/data_set/"

        for lists in f:
            if lists[3] == '1':
                img = cv2.imread(data_path + lists[1])
                img = cv2.resize(img, (input_shape[0], input_shape[1]))
            # if lists[3] == '1':
            #     train_images.append(img)
            #     train_labels.append(int(lists[2]))
            # else:
                test_images.append(img)
                test_labels.append(int(lists[2]))

        # train_images, test_images, train_labels, test_labels = train_test_split(X_data, Y_data, test_size=0.5)
        train_images = np.array(train_images).astype('float32') / 255.
        train_labels = np.array(train_labels)
        test_images = np.array(test_images).astype('float32') / 255.
        test_labels = np.array(test_labels)

        lb = preprocessing.LabelBinarizer().fit(np.array(range(num_class)))  # 对标签进行ont_hot编码
        # train_labels = lb.transform(train_labels)  # 因为是多分类任务，必须进行编码处理
        test_labels = lb.transform(test_labels)
        np.random.seed(200)
        np.random.shuffle(test_images)
        np.random.shuffle(test_labels)
        print("data are load")
    elif params.dataset == "BIT":
        X_data = []
        Y_data = []

        input_shape = (32, 32, 3)  # 3通道图像数据
        num_class = 6  # 数据类别数目
        selected_classes = np.arange(num_class)
        f = csv.reader(open('D:\Projects\PycharmProjects\Grad_/data/data_set/BIT_label.csv', 'r', encoding='utf-8'))
        print("readered label.csv")
        data_path = "D:\Projects\PycharmProjects\Grad_/data/data_set/BIT/"
        for lists in f:
            # print(lists)
            img = cv2.imread(data_path + lists[1])
            img = cv2.resize(img, (input_shape[0], input_shape[1]))
            X_data.append(img)
            Y_data.append(int(lists[2]))

        train_images, test_images, train_labels, test_labels = train_test_split(X_data, Y_data, test_size=0.6)
        train_images = np.array(train_images).astype('float32') / 255.
        train_labels = np.array(train_labels)
        test_images = np.array(test_images).astype('float32') / 255.
        test_labels = np.array(test_labels)

        lb = preprocessing.LabelBinarizer().fit(np.array(range(num_class)))  # 对标签进行ont_hot编码
        train_labels = lb.transform(train_labels)  # 因为是多分类任务，必须进行编码处理
        test_labels = lb.transform(test_labels)

        print("data are load")
    elif params.dataset == "GTSRB":
        X_data = []
        Y_data = []
        input_shape = (32, 32, 3)  # 3通道图像数据
        num_class = 43  # 数据类别数目
        # path = 'data/GTSRB/Final_Training/Images'
        path = 'D:\Projects\PycharmProjects\Grad_/data/data_set/GTSRB/Final_Training/Images'

        for file in tqdm(os.listdir(path)):
            lab = int(file)
            for photo_file in os.listdir(path + '/' + file):
                if photo_file[0] == 'G':
                    continue
                photo_file_path = path + '/' + file + '/' + photo_file
                img = cv2.imread(photo_file_path, 1)
                img = cv2.resize(img, (input_shape[0], input_shape[1]))
                X_data.append(img)
                Y_data.append(lab)
        X_data = np.array(X_data)
        X_data = X_data / 255.0
        Y_data = np.array(Y_data)

        # 对训练集进行切割，然后进行训练
        train_images, test_images, train_labels, test_labels = train_test_split(X_data, Y_data, test_size=0.2)

        lb = preprocessing.LabelBinarizer().fit(np.array(range(num_class)))  # 对标签进行ont_hot编码
        train_labels = lb.transform(train_labels)  # 因为是多分类任务，必须进行编码处理
        test_labels = lb.transform(test_labels)
        print("data are load")
    elif params.dataset == "BKK100":
        # BKK100 DATASET
        target_list = ['bus', 'car', 'rider', 'motor', 'truck', 'train', 'traffic light', 'bike', 'lane',
                       'drivable area', 'person', 'traffic sign']
        columns = ['attributes', 'timestamp']
        # train_folder = './data/bdd100k/images/100k/train'
        val_folder = 'D:\Projects\PycharmProjects\DeepSmartFuzzer\src\data\\bdd100k\images\\100k\\val'
        # observing the first five rows of train dataframe
        # df_train = pd.read_json('./data/bdd100k/labels/bdd100k_labels_images_train.json')
        df_val = pd.read_json(
            'D:\Projects\PycharmProjects\DeepSmartFuzzer\src\data\\bdd100k\labels\\bdd100k_labels_images_val.json')
        # created a new column which runs the above function on the labels column
        # df_train['clean_labels'] = df_train['labels'].map(clean_labels)
        df_val['clean_labels'] = df_val['labels'].map(clean_labels)
        # create copy of df, for loop to search if clean_labels contains target_list, convert to int
        # data_train = df_train.copy()
        # for target in target_list:
        #     data_train[target] = data_train['clean_labels'].str.contains(target)
        #     data_train[target] = data_train[target].astype(float)
        data_val = df_val.copy()
        for target in target_list:
            data_val[target] = data_val['clean_labels'].str.contains(target)
            data_val[target] = data_val[target].astype(int)
        # drop attributes and timestamp columns, not neccessary for our model
        # data_train = data_train.drop(columns=columns)
        data_val = data_val.drop(columns=columns)
        IMG_SIZE = (86, 86)

        train_datagen = ImageDataGenerator(rescale=1. / 255, validation_split=.3)
        # retreiving all the data from the 'data' dataframe and train_folder directory
        # creating train_set and test_set through ImageDataGenerator
        train_set = train_datagen.flow_from_dataframe(data_val, directory=val_folder,
                                                      x_col='name', y_col=target_list,
                                                      class_mode="multi_output", seed=24,
                                                      subset='training', batch_size=7000,
                                                      target_size=IMG_SIZE)

        test_set = train_datagen.flow_from_dataframe(data_val, directory=val_folder,
                                                     x_col='name', y_col=target_list,
                                                     class_mode="multi_output",
                                                     seed=24, batch_size=3000,
                                                     subset='validation',
                                                     target_size=IMG_SIZE)
        # create datasets for train, test, validation
        # this sell takes 31 mins to run
        train_images, train_labels = next(train_set)
        test_images, test_labels = next(test_set)
        # reshape labels
        train_labels = np.array(train_labels).reshape(len(train_labels), -1).T
        test_labels = np.array(test_labels).reshape(len(test_labels), -1).T

    else:
        raise Exception("Unknown Dataset:" + str(params.dataset))

    return {
        "train_inputs": train_images,
        "train_outputs": train_labels,
        "test_inputs": test_images,
        "test_outputs": test_labels
    }


def _get_model(params, experiment):
    import os
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

    if params.model == "INCEPTION_NET":
        from tensorflow.keras.models import load_model
        model = load_model("../data/neural_networks/inception_model.hdf5")
    elif params.model == "ResNet50":
        from tensorflow.keras.models import load_model
        model = load_model("D:\Projects\PycharmProjects\Grad_/data/neural_networks/BIT_ResNet50.h5")
    elif params.model == "DenseNet121":
        from tensorflow.keras.models import load_model
        model = load_model("D:\Projects\PycharmProjects\Grad_/data/neural_networks/BIT_DenseNet121.h5")
    elif params.model == "VGG16":
        from tensorflow.keras.models import load_model
        model = load_model("D:\Projects\PycharmProjects\Grad_/data/neural_networks/BIT_VGG16.h5")
    else:
        raise Exception("Unknown Model:" + str(params.model))

    return model


def _get_coverage(params, experiment):
    # if not params.implicit_reward:
    params.calc_implicit_reward_neuron = None
    params.calc_implicit_reward = None

    # handle input scaling before giving input to model
    def input_scaler(test_inputs):
        model_lower_bound = params.model_input_scale[0]
        model_upper_bound = params.model_input_scale[1]
        input_lower_bound = params.input_lower_limit
        input_upper_bound = params.input_upper_limit
        scaled_input = (test_inputs - input_lower_bound) / (input_upper_bound - input_lower_bound)
        scaled_input = scaled_input * (model_upper_bound - model_lower_bound) + model_lower_bound
        return scaled_input

    if params.coverage == "neuron":
        from coverages.neuron_cov import NeuronCoverage
        # TODO: Skip layers should be determined autoamtically
        coverage = NeuronCoverage(experiment.model, skip_layers=params.skip_layers,
                                  calc_implicit_reward_neuron=params.calc_implicit_reward_neuron,
                                  calc_implicit_reward=params.calc_implicit_reward)  # 0:input, 5:flatten
    elif params.coverage == "mutation":
        from coverages.kill_mutation import Mutation_Kill
        coverage = Mutation_Kill(experiment.model, params)
    else:
        raise Exception("Unknown Coverage" + str(params.coverage))

    coverage._step = coverage.step
    coverage.step = lambda test_inputs, test_labels, *a, **kwa: coverage._step(test_inputs, test_labels, *a, **kwa)

    return coverage


def _get_input_chooser(params, experiment):
    if params.input_chooser == "random":
        from src.input_chooser import InputChooser
        input_chooser = InputChooser(experiment.datasets["test_inputs"], experiment.datasets["test_outputs"])
    elif params.input_chooser == "clustered_random":
        from src.clustered_input_chooser import ClusteredInputChooser
        input_chooser = ClusteredInputChooser(experiment.datasets["test_inputs"], experiment.datasets["test_outputs"])
    else:
        raise Exception("Unknown Input Chooser" + str(params.input_chooser))

    return input_chooser
