#EEG_classify.py
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from datetime import datetime
import scipy.io
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import numpy as np

#载入预处理好的数据
data = scipy.io.loadmat(r'data_preprocessed.mat')
x_data = data['X']
y_data = data['Y']
#划分数据集
#训练，验证，测试集（Train : Dev : Test ）
tr, dev, test=200,28,50
dev += tr
test += dev

x_train=x_data[0:tr-1]
x_dev=x_data[tr:dev-1]
x_test=x_data[dev:test-1]

y_train=y_data[0:tr-1]
y_dev=y_data[tr:dev-1]
y_test=y_data[dev:test-1]

sample=300

HP_FILTERS     = hp.HParam('filters', hp.IntInterval(16, 32))
HP_kernel_size = hp.HParam('kernel_size', hp.IntInterval(16, 32))
HP_pool_size   = hp.HParam('pool_size', hp.IntInterval(2, 4))
HP_NUM_UNITS   = hp.HParam('num_units', hp.IntInterval(16, 32))
HP_DROPOUT     = hp.HParam('dropout', hp.RealInterval(0.2, 0.5))
#HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))

METRIC_ACCURACY = 'accuracy'

with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
  hp.hparams_config(
    hparams=[HP_kernel_size,HP_FILTERS,HP_pool_size, HP_NUM_UNITS, HP_DROPOUT],
    metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
  )

def train_test_model(hparams):
  model = tf.keras.models.Sequential([
      keras.Input(shape=(64, sample), dtype='float32'),
      layers.Conv1D(filters=hparams[HP_FILTERS], data_format='channels_first',
                    kernel_size=(hparams[HP_kernel_size],), padding="causal"),
      layers.BatchNormalization(axis=-1),
      layers.Activation('relu'),
      layers.MaxPooling1D(pool_size=(hparams[HP_pool_size],), padding='valid',
                          data_format='channels_first'),
      layers.LSTM(units=hparams[HP_NUM_UNITS], dropout=hparams[HP_DROPOUT],
                  kernel_regularizer=regularizers.l2(1e-3)),
      layers.Dense(2, activation="softmax"),
  ])
  model.summary()
  model.compile(
      loss="categorical_crossentropy",
      optimizer="adam",
      metrics=[keras.metrics.BinaryAccuracy()]
  )

  logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
  tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

  model.fit(x_train, y_train, epochs=100, batch_size=2,
            validation_data=(x_dev, y_dev),
            callbacks=[tensorboard_callback,hp.KerasCallback(logdir, hparams)]
  )
  _, accuracy = model.evaluate(x_test, y_test)
  return accuracy

def run(run_dir, hparams):
  with tf.summary.create_file_writer(run_dir).as_default():
    hp.hparams(hparams)  # record the values used in this trial
    accuracy = train_test_model(hparams)
    tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)


session_num = 0

#(HP_NUM_UNITS.domain.min_value, HP_NUM_UNITS.domain.max_value)

for i in range(1,5):
 hparams = {
     HP_FILTERS: 32,
     HP_kernel_size: 32,
     HP_pool_size:4,
     HP_NUM_UNITS: 16,
     HP_DROPOUT:0.2,
 }
 run_name = "run-%d" % session_num
 print('--- Starting trial: %s' % run_name)
 print({h.name: hparams[h] for h in hparams})
 run('logs/hparam_tuning/' + run_name, hparams)
 session_num += 1

#CNN-LSTM混合模型
"""

model = keras.Sequential(
    [
        keras.Input(shape=(64,sample),dtype='float32'),
        layers.Conv1D(filters=16, data_format='channels_first',kernel_size = 24,padding="causal"),
        layers.BatchNormalization(axis=-1),
        layers.Activation('relu'),
        layers.MaxPooling1D(pool_size=4, padding='valid', data_format='channels_first'),
        layers.LSTM(16,dropout=0.2,kernel_regularizer=regularizers.l2(1e-3)),
        layers.Dense(2, activation="softmax"),
    ]
)
model.summary()
## 损失函数为交叉熵损失函数
model.compile(loss="categorical_crossentropy", optimizer="adam",metrics=[keras.metrics.BinaryAccuracy()])

logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
#训练
model.fit(x_train, y_train, epochs=100, batch_size=2,
                    validation_data=(x_test, y_test),
    callbacks=[tensorboard_callback],)

#测试
loss, acc = model.evaluate(x_test, y_test,batch_size=2)
print("loss: %.2f" % loss)
print("accuracy: %.2f" % acc)

"""