import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf

print(f"TensorFlow version = {tf.__version__}\n")

# Set a fixed random seed value, for reproducibility, this will allow us to get
# the same random numbers each time the notebook is run
SEED = 1337
np.random.seed(SEED)
tf.random.set_seed(SEED)

# the list of gestures that data is available for
GESTURES = [
	"zuoxie",
	"youxie",
	"hen",
	"shu",
	"(",
	")",
]

SAMPLES_PER_GESTURE = 50

NUM_GESTURES = len(GESTURES)

# create a one-hot encoded matrix that is used in the output
ONE_HOT_ENCODED_GESTURES = np.eye(NUM_GESTURES)

inputs = []
outputs = []

# read each csv file and push an input and output
for gesture_index in range(NUM_GESTURES):
	gesture = GESTURES[gesture_index]
	print(f"Processing index {gesture_index} for gesture '{gesture}'.")
	
	output = ONE_HOT_ENCODED_GESTURES[gesture_index]
	
	df = pd.read_csv("../" + gesture + ".csv")
	
	# calculate the number of gesture recordings in the file
	num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)
	
	print(f"\tThere are {num_recordings} recordings of the {gesture} gesture.")
	
	for i in range(num_recordings):
		tensor = []
		for j in range(SAMPLES_PER_GESTURE):
			index = i * SAMPLES_PER_GESTURE + j
			# normalize the input data, between 0 to 1:
			# - acceleration is between: -4 to +4
			# - gyroscope is between: -2000 to +2000
			t = [(df['aX'][index] + 4) / 8,(df['aY'][index] + 4) / 8,(df['aZ'][index] + 4) / 8,(df['gX'][index] + 2000) / 4000,(df['gY'][index] + 2000) / 4000,(df['gZ'][index] + 2000) / 4000]
			# if math.isnan(np.sum(t)):
			# 	print (gesture_index, i, j, t)
			# 	index += 1
			# 	t = [(df['aX'][index] + 4) / 8,(df['aY'][index] + 4) / 8,(df['aZ'][index] + 4) / 8,(df['gX'][index] + 2000) / 4000,(df['gY'][index] + 2000) / 4000,(df['gZ'][index] + 2000) / 4000]
			tensor += t

	inputs.append(tensor)
	outputs.append(output)

# convert the list to numpy array
inputs = np.array(inputs)
outputs = np.array(outputs)

print("Data set parsing and preparation complete.")

# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation
# https://stackoverflow.com/a/37710486/2020087
num_inputs = len(inputs)
randomize = np.arange(num_inputs)
np.random.shuffle(randomize)

# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes
inputs = inputs[randomize]
outputs = outputs[randomize]

# Split the recordings (group of samples) into three sets: training, testing and validation
TRAIN_SPLIT = int(0.6 * num_inputs)
TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)

inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])
outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])

print("Data set randomization and splitting complete.")

import tensorflow as tf
from tensorflow.keras import layers

class AttentionLayer(layers.Layer):
	def __init__(self):
		super(AttentionLayer, self).__init__()

	def build(self, input_shape):
		self.W = self.add_weight(shape=(input_shape[-1], 1), initializer='random_normal', trainable=True)
		self.b = self.add_weight(shape=(input_shape[1],), initializer='zeros', trainable=True)
		super(AttentionLayer, self).build(input_shape)

	def call(self, inputs):
		e = tf.matmul(inputs, self.W) + self.b
		a = tf.nn.softmax(e, axis=1)
		weighted_inputs = inputs * a
		return tf.reduce_sum(weighted_inputs, axis=1)

# 构建模型（包含注意力机制）
model = tf.keras.Sequential([
	layers.Input(shape=(SAMPLES_PER_GESTURE*6, 1)),
	layers.Conv1D(32, 3, activation='relu'),
	layers.MaxPooling1D(2),
	layers.Flatten(),
	AttentionLayer(),
	layers.Reshape((1, -1)),
	layers.Dense(64, activation='relu'),
	layers.Dense(NUM_GESTURES, activation='softmax')
])

model.summary()
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))

# increase the size of the graphs. The default size is (6,4).
plt.rcParams["figure.figsize"] = (20,10)

# graph the loss, the model above is configure to use "mean squared error" as the loss function
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

print(plt.rcParams["figure.figsize"])

######################################################################
# 训练模型...
######################################################################

# 转换为 TensorFlow Lite 模型
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# tflite_model = converter.convert()

# 保存为 TFLite 模型文件
# with open('model.tflite', 'wb') as f:
#	 f.write(tflite_model)