#!/usr/bin/env python3.7.9
'''
Copyright © 2021 DUE TUL
@ date  : Friday january 15, 2020
@ desc  : This modules is used to train date
@ author:  
'''
import tensorflow as tf
import reader
import numpy as np
EPOCHS = 50
class_dim = 4
import math

tf.enable_eager_execution()
model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(filters=20, kernel_size=5, activation=tf.nn.relu, input_shape=(128, 214, 1)),
    tf.keras.layers.Conv2D(filters=50, kernel_size=5, activation=tf.nn.relu),
    tf.keras.layers.MaxPool2D(pool_size=2, strides=2),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(units=128, activation=tf.nn.relu),
    tf.keras.layers.Dense(units=class_dim, activation=tf.nn.softmax)
])

optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)

m = model.summary()
print('model:',m)
# define optimizer

# train_rate = 0.9
# train_dataset = reader.train_reader_tfrecord('dataset/acc_train.tfrecord', EPOCHS)
train_dataset = reader.train_reader_tfrecord('dataset/tfrecords/non-sacle-merge_train.tfrecord', EPOCHS)
# test_dataset = reader.test_reader_tfrecord('dataset/acc_test.tfrecord')
test_dataset = reader.test_reader_tfrecord('dataset/tfrecords/non-sacle-merge_test.tfrecord')
# dataset_element = dataset.make_one_shot_iterator().get_next()
# train_dataset = dataset.take(round(int(dataset_element['data'].shape[0]) * train_rate))
# test_dataset = dataset.skip(round(int(dataset_element['data'].shape[0]) * train_rate))
# train_element = train_dataset.make_one_shot_iterator().get_next()
# test_element = test_dataset.make_one_shot_iterator().get_next()
# print('train_element',train_element['data'])
# print('test_element',test_element['data'])
# print('t:',train_element['data'].shape[0])
# print(test_element['data'].shape[0])
# train_dataset = train_dataset.shuffle(buffer_size=train_element['data'].shape[0] * 3)
# shuffle the dataset
for batch_id, data in enumerate(train_dataset):
    sounds = data['data'].numpy().reshape((-1, 128, 214, 1))
    labels = data['label']
    # make train
    with tf.GradientTape() as tape:
        predictions = model(sounds)
        # get loss of train
        train_loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)
        train_loss = tf.reduce_mean(train_loss)
        # get accuracy of train
        train_accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, predictions)
        train_accuracy = np.sum(train_accuracy.numpy()) / len(train_accuracy.numpy())

    # update gradients
    gradients = tape.gradient(train_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    if batch_id % 20 == 0:
        print("Batch %d, Loss %f, Accuracy %f" % (batch_id, train_loss.numpy(), train_accuracy))

    if batch_id % 100 == 0 and batch_id != 0:
        test_losses = list()
        test_accuracies = list()
        for d in test_dataset:
            test_sounds = d['data'].numpy().reshape((-1, 128, 214, 1))
            test_labels = d['label']
            test_result = model(test_sounds)
            # get loss of test
            test_loss = tf.keras.losses.sparse_categorical_crossentropy(test_labels, test_result)
            test_loss = tf.reduce_mean(test_loss)
            test_losses.append(test_loss)
            # get accur   acy of test
            test_accuracy = tf.keras.metrics.sparse_categorical_accuracy(test_labels, test_result)
            print('test accuracy:',test_accuracy)
            test_accuracy = np.sum(test_accuracy.numpy()) / len(test_accuracy.numpy())
            test_accuracies.append(test_accuracy)

        print('=================================================')
        print("Test, Loss %f, Accuracy %f" % (
            sum(test_losses) / len(test_losses), sum(test_accuracies) / len(test_accuracies)))
        print('=================================================')

        # save model
        model.save(filepath='models/non-sacle-new-merge-cnn130214.h5')
print("done!")