# -*- coding: utf-8 -*-
# 导入库
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, models
from date_process import data_read_csv

# 忽略警告
import warnings

warnings.filterwarnings("ignore")

# 读取数据
train_x, train_y = data_read_csv()
train_labels_one_hot = tf.keras.utils.to_categorical(train_y, num_classes=2)
train_y = train_labels_one_hot

# 数据形状
int_sequence_len = train_x.shape[1]
out_len = train_y.shape[1]

# 划分验证集和测试集
x_train, x_test, y_train, y_test = train_test_split(np.array(train_x), np.array(train_y), test_size=0.2, random_state=1)

# 数据预处理
x_train = x_train.reshape(len(x_train), int_sequence_len, -1)
x_test = x_test.reshape(len(x_test), int_sequence_len, -1)

print(f"x_train: {x_train.shape}, y_train: {y_train.shape}")
print(f"x_test: {x_test.shape}, y_test: {y_test.shape}")


# 创建Transformer模型
def create_transformer_model(input_shape, num_classes):
    inputs = layers.Input(shape=input_shape)

    # Transformer编码层
    x = layers.MultiHeadAttention(num_heads=4, key_dim=64)(inputs, inputs)
    x = layers.LayerNormalization()(x + inputs)
    x = layers.Dense(128, activation="relu")(x)
    x = layers.LayerNormalization()(x + inputs)

    # 平均池化层
    x = layers.GlobalAveragePooling1D()(x)

    # 输出层
    outputs = layers.Dense(num_classes, activation="softmax")(x)

    model = models.Model(inputs, outputs)
    model.compile(optimizer="adam",
                  loss="binary_crossentropy",
                  metrics=["categorical_accuracy"])
    return model


# 模型创建与训练
# 创建Transformer模型
model = create_transformer_model(input_shape=(int_sequence_len, x_train.shape[2]), num_classes=2)

model.summary()
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, batch_size=32, shuffle=True)

# 保存模型权重
model.save_weights('transformer_model_weights')

# 可视化训练过程
training_loss = history.history['loss']
validation_loss = history.history['val_loss']
epoch_count = range(1, len(training_loss) + 1)
plt.plot(epoch_count, training_loss, 'r--')
plt.plot(epoch_count, validation_loss, 'b-')
plt.legend(['Training Loss', 'Validation Loss'])
plt.title("Train and Validation Loss")
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()

# 预测并评估
y_pred = model.predict(x_test)
y_true = np.argmax(y_test, axis=1)
y_pred_classes = np.argmax(y_pred, axis=1)

# 计算评价指标
from metra import acc_metra

acc_metra(y_true, y_pred_classes, label=['0', '1'])


