import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as k
from tensorflow import keras

# AUTOTUNE is used in the later tf.data.Dataset.prefetch method
AUTOTUNE = tf.data.experimental.AUTOTUNE
# We define BATCH_SIZE and epochs
BATCH_SIZE = 32
EPOCHs = 2
# We define the class type of CAT and DOG
CAT = 0
DOG = 1

# store the train_total and val image paths in the corresponding directory
# train_total data paths
train_list = np.hstack((
    ['./data/train_total/cat.%d.jpg' % i for i in range(0, 10000)],
    ['./data/train_total/dog.%d.jpg' % i for i in range(0, 10000)]
))

# val data paths
val_list = np.hstack((
    ['./data/train_total/cat.%d.jpg' % i for i in range(10000, 12500)],
    ['./data/train_total/dog.%d.jpg' % i for i in range(10000, 12500)],
))

# predict data paths
predict_list = np.array(['./data/test/%d.jpg' % i for i in range(1, 12500)])

# we define CAT as ZERO and DOG as ONE
train_label_list = np.hstack((np.zeros([10000]), np.ones(10000)))
val_label_list = np.hstack((np.zeros(2500), np.ones(2500)))

# total number of train, validation and prediction list
train_img_count = len(train_list)
val_img_count = len(val_list)
predict_img_count = len(predict_list)

# steps are used in the model train, validation and prediction to determine the number of training times in every epoch
steps_per_epoch = tf.math.ceil(train_img_count / BATCH_SIZE).numpy()
steps_val = tf.math.ceil(val_img_count / BATCH_SIZE).numpy()
steps_predict = tf.math.ceil(predict_img_count / BATCH_SIZE).numpy()


def change_jpg_to_tensor(jpg_path: str) -> tf.Tensor:
    """
    read the pics in the jpg path and change it to image tf.Tensor
    :param jpg_path: jpg path
    :return: image tf.Tensor
    """
    img_raw = tf.io.read_file(jpg_path)
    img_tensor = tf.image.decode_jpeg(img_raw, channels=3)  # channels=3 means we output the RGB image
    # we resize all the images in the list to [192, 192] for models
    img_tensor = tf.image.resize(img_tensor, [256, 256])
    img_tensor /= 255.0  # normalization to [0, 1] range
    return img_tensor


def create_image_dataset(path_list: np.array(str), label_list: np.array(str), img_count: int) -> tf.data.Dataset:
    """
    create a image Tensorflow dataset for model training and validating
    :param path_list: image file path list
    :param label_list: image label list
    :param img_count: number of images
    :return: Tensorflow image dataset repeated and shuffled with BATCH_SIZE
    """
    path_ds = tf.data.Dataset.from_tensor_slices(path_list)
    img_ds = path_ds.map(change_jpg_to_tensor)
    label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(label_list, tf.int64))
    image_label_ds = tf.data.Dataset.zip((img_ds, label_ds))

    # We need to shuffle and repeat the data
    image_label_ds = image_label_ds.shuffle(buffer_size=img_count).repeat().batch(batch_size=BATCH_SIZE)
    # image_label_ds = image_label_ds.repeat().batch(batch_size=BATCH_SIZE)
    image_label_ds.prefetch(buffer_size=AUTOTUNE)
    return image_label_ds


def create_predict_image_dataset(path_list: np.array(str), img_count: int) -> tf.data.Dataset:
    """
    create a image dataset for prediction without any label
    :param path_list: image path list
    :param img_count: number of images in path_list
    :return: Tensorflow image dataset
    """
    path_ds = tf.data.Dataset.from_tensor_slices(path_list)
    img_ds = path_ds.map(change_jpg_to_tensor)

    img_ds = img_ds.shuffle(buffer_size=img_count).repeat().batch(batch_size=BATCH_SIZE)
    # img_ds = img_ds.batch(batch_size=BATCH_SIZE)
    img_ds.prefetch(buffer_size=AUTOTUNE)
    return img_ds


def compile_model(model):
    """
    compile models with the same optimizer and loss function
    :param model: model to be compiled
    :return: None
    """
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=keras.losses.binary_crossentropy,
        metrics=[keras.metrics.binary_accuracy, dog_accuracy, cat_accuracy]
    )


def build_dnn_model() -> keras.Model:
    """
    build a deep neural network model with three Dense hidden layers
    :return: the deep neural network
    """
    model = keras.Sequential([
        # input layer, using Flatten layer to flatten the image Tensor
        keras.layers.Flatten(input_shape=(256, 256, 3)),
        # hidden layers
        keras.layers.Dense(units=512, activation=keras.activations.relu),
        keras.layers.Dense(units=256, activation=keras.activations.relu),
        keras.layers.Dense(units=128, activation=keras.activations.relu),
        # because we are trying to do binary classification, the output layer must be 1 unit
        keras.layers.Dense(units=1, activation=keras.activations.sigmoid)
    ])
    compile_model(model)
    return model


def build_cnn_model() -> keras.Model:
    """
    build a convolutional neural network with three Conv2D layers, two MaxPooling2D layers and one Dense layer
    :return: convolution neural network model
    """
    model = keras.Sequential([
        # start to build the Convolutional Network using Conv2D and MaxPooling2D
        # input layer, we use Conv2D as the input layer
        keras.layers.Conv2D(filters=32,
                            kernel_size=(3, 3),
                            activation=keras.activations.relu,
                            input_shape=(256, 256, 3)),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),
        keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=keras.activations.relu),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),
        keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=keras.activations.relu),
        # start to build the Dense Network
        # we need to flatten the Conv2D layer for later Dense layer use
        keras.layers.Flatten(),
        keras.layers.Dense(units=128, activation=keras.activations.relu),
        # because we're trying the do binary classification, output layer must be 1 unit
        keras.layers.Dense(units=1, activation=keras.activations.sigmoid),
    ])
    compile_model(model)
    return model


def build_rnn_model() -> keras.Model:
    """
    build a Recurrent Neural Network with 32 units
    :return: a Recurrent Neural Network
    """
    model = keras.Sequential([
        # Though our input shape is (256, 256, 3) and can not directly input to SimpleRNN, we need to Reshape
        keras.layers.Reshape(input_shape=(256, 256, 3), target_shape=(256, 256 * 3)),
        # Hidden layer, a SimpleRNN with 32 units
        keras.layers.SimpleRNN(units=32),
        # because we're trying to do binary classification, output layer must be 1 unit to make it properly
        keras.layers.Dense(1, activation=keras.activations.sigmoid)
    ])
    compile_model(model)
    return model


def model_train(model: keras.Model, train_ds: tf.data.Dataset, epochs: int):
    """
    train the model with epochs and steps_per_epoch
    :param epochs: epochs in the training procedure
    :param train_ds: training dataset
    :param model: model to be trained and evaluated
    :return: None
    """
    history = model.fit(train_ds,
                        epochs=epochs,
                        steps_per_epoch=steps_per_epoch)
    return history


def dog_accuracy(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
    return accuracy(y_true, y_pred, class_type=DOG)


def cat_accuracy(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
    return accuracy(y_true, y_pred, class_type=CAT)


def accuracy(y_true: tf.Tensor, y_pred: tf.Tensor, class_type: int, threshold=0.5) -> tf.Tensor:
    """
    self-defined accuracy function to calculate the accuracy of one class. used in keras.model.compile.metric
    :param y_true: labels of the whole data
    :param y_pred: predications of the model
    :param class_type: integer, one class of the labels
    :param threshold: because we're using the binary_accuracy function,
                      if predication > threshold, we define it as class 1, otherwise class 0
    :return: tf.Tensor, accuracy of the class_type class
    """
    threshold = k.cast(threshold, y_pred.dtype)
    """
    this is a judge tensor to judge the number of the class_type params to be classified correctly
    we use class_type * 2 because when y_true and y_pred are added together, 
    the correctly classified params will be class_type * 2
    
    e.g.
     the y_true is tf.Tensor([1, 1, 0, 0]) and the y_pred is tf.Tensor([0, 1, 0, 1]),
    so y_true + y_pred is tf.Tensor([1, 2, 0, 1]), if CAT is 0 and DOG is 1, 
    we can know that: (y_true + y_pred)[1] is correctly classified as a DOG 
    and (y_true + y_pred)[2] is correctly classified as a CAT
    """
    true_judge = k.cast(class_type * 2, y_pred.dtype)
    # this is a judge tensor to judge the number of the class_type params
    total_judge = k.cast(class_type, y_pred.dtype)
    y_pred = k.cast(y_pred > threshold, dtype=y_pred.dtype)
    # calculate the total number of the params correctly classified
    true = k.sum(k.cast(y_true + y_pred == true_judge, dtype=y_pred.dtype))
    # calculate the total number of one class_type
    total = k.sum(k.cast(y_true == total_judge, dtype=y_true.dtype))
    return true / total


def model_val(model: keras.Model, test_ds: tf.data.Dataset) -> (float, float, float, float):
    """
    test the model with steps
    :param model: model to be evaluated
    :param test_ds: testing dataset
    :return: None
    """
    test_loss, test_acc, dog_acc, cat_acc = model.evaluate(test_ds, steps=steps_val)
    return test_loss, test_acc, dog_acc, cat_acc


def model_predict(model: keras.Model, predict_ds: tf.data.Dataset):
    return model.predict(predict_ds, steps=steps_predict)


if __name__ == '__main__':
    # start to build training dataset and validation dataset
    # creating the training and validation dataset
    train_image_ds = create_image_dataset(path_list=train_list,
                                          label_list=train_label_list,
                                          img_count=train_img_count)
    val_image_ds = create_image_dataset(path_list=val_list,
                                        label_list=val_label_list,
                                        img_count=val_img_count)
    predict_image_ds = create_predict_image_dataset(path_list=predict_list,
                                                    img_count=predict_img_count)

    dnn_model = build_dnn_model()
    cnn_model = build_cnn_model()
    rnn_model = build_rnn_model()

    dnn_model.summary()
    cnn_model.summary()
    rnn_model.summary()
    # start to train three models
    model_train(dnn_model, train_image_ds, EPOCHs)
    model_train(cnn_model, train_image_ds, EPOCHs)
    model_train(rnn_model, train_image_ds, EPOCHs)
    # start to calculate the loss and accuracy of the models
    dnn_test_loss, dnn_test_acc, dnn_cat_acc, dnn_dog_acc = model_val(dnn_model, val_image_ds)
    cnn_test_loss, cnn_test_acc, cnn_cat_acc, cnn_dog_acc = model_val(cnn_model, val_image_ds)
    rnn_test_loss, rnn_test_acc, rnn_cat_acc, rnn_dog_acc = model_val(rnn_model, val_image_ds)

    print("DNN test loss: %f, total test accuracy: %f, dog accuracy: %f, cat accuracy: %f\n"
          "CNN test loss: %f, total test accuracy: %f, dog accuracy: %f, cat accuracy: %f\n"
          "RNN test loss: %f, total test accuracy: %f, dog accuracy: %f, cat accuracy: %f\n"
          % (
              dnn_test_loss, dnn_test_acc, dnn_dog_acc, dnn_cat_acc,
              cnn_test_loss, cnn_test_acc, cnn_dog_acc, cnn_cat_acc,
              rnn_test_loss, rnn_test_acc, rnn_dog_acc, rnn_cat_acc,
          ))
