#!/usr/bin/python3
# -*- coding: utf-8 -*-

"""
Created on 2021/10/20
@author: Yuze Xuan
"""

import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.callbacks import TensorBoard
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.losses import SparseCategoricalCrossentropy
from tensorflow.python.keras.optimizer_v2.adam import Adam

# Dataset settings
DATA_PATH = 'iris-data/iris.data'
DATA_HEADERS = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
LABEL_MAP = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
LABEL_COL_NAME = 'class'
RANDOM_SEED = 6
SPLIT_RATE = {'train': 0.7, 'val': 0.15, 'test': 0.15}
# Model settings
tf.random.set_seed(55)

# Load data
data = pd.read_csv(DATA_PATH, delimiter=',', names=DATA_HEADERS, index_col=False)
# Filter samples
data = data[data[LABEL_COL_NAME].isin(LABEL_MAP.keys())]
# Data description analysis
sns.pairplot(data, hue=LABEL_COL_NAME)
plt.savefig('res_img/data_description.png')
plt.show()
# Transform labels
for key in LABEL_MAP.keys():
    data.loc[data[LABEL_COL_NAME] == key, LABEL_COL_NAME] = LABEL_MAP[key]
label = np.array(data[LABEL_COL_NAME], dtype=np.int64)
del data[LABEL_COL_NAME]
data = np.array(data, dtype=np.float64)
# Randomize and split dataset
x_train_val, x_test, y_train_val, y_test = train_test_split(data, label, test_size=SPLIT_RATE['test'],
                                                            random_state=RANDOM_SEED)
val_num = round(len(y_test) * SPLIT_RATE['val'] / SPLIT_RATE['test'])
x_train, x_val = x_train_val[:-val_num, :], x_train_val[-val_num:, :]
y_train, y_val = y_train_val[:-val_num], y_train_val[-val_num:]

# Define model
model = Sequential()
model.add(Dense(7, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Show model
model.summary()
# Compile model
model.compile(loss=SparseCategoricalCrossentropy(), optimizer=Adam(learning_rate=0.01),
              metrics=['accuracy'])
# Train model
history = model.fit(x_train, y_train, epochs=150, batch_size=64, validation_data=(x_val, y_val),
                    callbacks=[TensorBoard()])
# Test model
score = model.evaluate(x_test, y_test)
print('Test Loss: %.4f, Test accuracy: %.4f' % (score[0], score[1]))

# Visualize the result
# To run TensorBoard please use: tensorboard --logdir logs
# Extract the result
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

# Plot the result
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.savefig('res_img/train_val_loss_acc.png')
plt.show()
