"""
Environment:    tensorflow              2.16.1
                keras                   3.2.1
                python                  3.12.2
Date:           2024/4/24 9:14  
"""

import keras
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf

# 记录开始时间
start_time = time.time()

"""
1. Processing the data:
    outputData.py
    获取到的CSV文件JSON格式有问题 键没有引号 用这个文件解析出训练集压力数据到data/Output.csv
    测试集到data/test.csv 
    
    0异常 1正常
"""

"""
2. Read data from CSV file:
    读取CSV数据 转换格式后拉伸并插值统一长度
    # 不要手动编辑CSV文件 Excel保存逗号分隔文件读取时会报错
"""
import csv
from scipy.interpolate import interp1d

"""
@brief: 拉伸时间序列到指定长度并做线性插值填充序列 
@param: curve_samples 二维列表[ [序列1] , [序列2] , ... , [序列n] ]
        target_length 拉伸到指定长度
@ret:   拉伸插值后的时间序列
"""
def stretch_and_interpolate(curve_samples, target_length):
    
    # 初始化一个数组来存储插值填充后的曲线
    interpolated_curves = np.zeros((len(curve_samples), target_length))
    
    # 对每个曲线进行插值填充
    for i, curve in enumerate(curve_samples):
        # 使用插值方法
        f = interp1d(np.linspace(0, 1, len(curve)), curve)
        # 计算插值填充后的曲线
        interpolated_curve = f(np.linspace(0, 1, target_length))
        interpolated_curve_rounded = np.around(interpolated_curve, decimals=1)
        # 将插值填充后的曲线存储到数组中
        interpolated_curves[i] = interpolated_curve_rounded
    
    return interpolated_curves

"""
@brief  读取指定路径文件，格式为
        [Label] [point0] [point1] [point2] ... [pointN]
        [Label] [point0] [point1] [point2] ... [pointN]
        ...
        [Label] [point0] [point1] [point2] ... [pointN]
        @Label 1为异常类 0为正常类 可以多分类 例如每种吸液量分一类
@param  file_path 指定文件路径
@ref    样本数据data_x
        标签数据data_y
"""
def read_and_convert(file_path):
    data = []
    with open(file_path, newline='') as csvfile:
        # 创建一个 CSV 读取器对象
        csv_reader = csv.reader(csvfile)
        # 逐行读取 CSV 文件内容并打印出来
        for row in csv_reader:
            result = ",".join(row)
            data.append(result)

    string_list = []
    for item in data:
        string_list.append(item)
        
    data_X = []
    data_y = []    
    for string in string_list:
        # 分割字符串并将每个数字字符串转换为整数
        number_list_X = [int(num) for num in string[2:].split(',')]
        number_list_Y = [int(num) for num in string[0]]
        data_X.append(number_list_X)
        data_y.append(number_list_Y)
    return data_X, data_y

"""
@brief  z-score标准化数据预处理 
@param  data np.array二维数组
@ret    归一化后数据
"""
def z_score_standardization(data):
    # 计算均值和标准差
    mean = np.mean(data)
    std = np.std(data)
    
    # 对数据进行Z-score标准化
    z_score_data = (data - mean) / std
    
    return z_score_data

"""
@train_path:    训练集文件路径
@test_path:     测试集文件路径
@target_length: 填充序列长度 取决于训练集数据最大长度
"""
train_path = 'data/Output.csv'
test_path = 'data/test.csv'
target_length = 300

"""
@epochs     控制迭代周期
@batch_size 控制分批处理大小
"""
epochs = 50
batch_size = 128

train_data_x, train_data_y = read_and_convert(train_path)
interpolated_train_x = stretch_and_interpolate(train_data_x, target_length)
x_train = np.array(interpolated_train_x)
x_train = z_score_standardization(x_train)
y_train = np.array(train_data_y)

test_data_x, test_data_y = read_and_convert(test_path)
interpolated_test_x = stretch_and_interpolate(test_data_x, target_length)
x_test = np.array(interpolated_test_x)
x_test = z_score_standardization(x_test)
y_test = np.array(test_data_y)

classes = np.unique(np.concatenate((y_train, y_test), axis=0))

x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))

num_classes = len(np.unique(y_train))

idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]

"""
以下为模型构建训练部分
@ref: https://keras.io/examples/timeseries/timeseries_classification_from_scratch/#timeseries-classification-from-scratch
"""
# def make_model(input_shape):
#     input_layer = keras.layers.Input(input_shape)

#     conv1 = keras.layers.Conv1D(filters=32, kernel_size=6, padding="same")(input_layer)
#     conv1 = keras.layers.BatchNormalization()(conv1)
#     conv1 = keras.layers.ReLU()(conv1)

#     conv2 = keras.layers.Conv1D(filters=64, kernel_size=6, padding="same")(conv1)
#     conv2 = keras.layers.BatchNormalization()(conv2)
#     conv2 = keras.layers.ReLU()(conv2)

#     conv3 = keras.layers.Conv1D(filters=128, kernel_size=6, padding="same")(conv2)
#     conv3 = keras.layers.BatchNormalization()(conv3)
#     conv3 = keras.layers.ReLU()(conv3)

#     gap = keras.layers.GlobalAveragePooling1D()(conv3)

#     output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)

#     return keras.models.Model(inputs=input_layer, outputs=output_layer)

from keras.models import Sequential # 导入序贯模型 
from keras import layers # 导入所有类型的层 
from keras.optimizers import Adam # 导入优化器 

callbacks = [
    keras.callbacks.ModelCheckpoint(
        "best_model.keras", save_best_only=True, monitor="val_loss"
    ),
    keras.callbacks.ReduceLROnPlateau(
        monitor="val_loss", factor=0.5, patience=20, min_lr=0.0001
    ),
    keras.callbacks.EarlyStopping(monitor="val_loss", patience=50, verbose=1),
]

try:
    model = keras.models.load_model("best_model.keras")
    print("Loaded model from disk")
except:
    print("No existing model found, creating new model...")
    model = Sequential()# 序贯模型 
    model.add(layers.Conv1D(32, kernel_size=10, strides=4, input_shape=x_train.shape[1:]))# 1D CNN层 
    model.add(layers.MaxPooling1D(pool_size=4, strides=2))# 池化层 
    model.add(layers.GRU(256, return_sequences=True))# GRU层要足够大 
    model.add(layers.Flatten())# 展平层 
    model.add(layers.Dropout(0.4))# Dropout层 
    model.add(layers.BatchNormalization())# 批标准化 
    model.add(layers.Dense(1, activation='sigmoid'))# 分类输出层 
    opt = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01) 
    model.compile(optimizer=opt,# 优化器 
                  loss = 'binary_crossentropy',# 交叉熵 
                    metrics=['acc'])# 准确率
    
    # model = make_model(input_shape=x_train.shape[1:])
    # keras.utils.plot_model(model, show_shapes=True)
    # model.compile(
    #     optimizer="adam",
    #     loss="sparse_categorical_crossentropy",
    #     metrics=["sparse_categorical_accuracy"],
    # )
    
# model.summary()

history = model.fit(
    x_train,
    y_train,
    batch_size=batch_size,
    epochs=epochs,
    callbacks=callbacks,
    validation_split=0.2,
    verbose=1,
)

# # Step 1: Save the Keras model
# model.export('my_model')

# # Step 2: Convert to TensorFlow Lite format with additional options
# converter = tf.lite.TFLiteConverter.from_saved_model("my_model")
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
# converter._experimental_lower_tensor_list_ops = False  # Disable lowering tensor list ops
# converter.experimental_enable_resource_variables = True  # Enable experimental resource variables
# tflite_model = converter.convert()

# # Step 3: Save the TensorFlow Lite model
# with open("converted_model.tflite", "wb") as f:
#     f.write(tflite_model)


# 加载训练完成模型测试
model = keras.models.load_model("best_model.keras")

test_loss, test_acc = model.evaluate(x_test, y_test)

print("Test accuracy", test_acc)
print("Test loss", test_loss)

# 记录结束时间
end_time = time.time()

# 计算执行时间
execution_time = end_time - start_time
print("程序执行时间为：", execution_time, "秒")
metric = "acc"
plt.figure()
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric])
plt.title("model " + metric)
plt.ylabel(metric, fontsize="large")
plt.xlabel("epoch", fontsize="large")
plt.legend(["train", "val"], loc="best")
plt.show()
plt.close()

import seaborn as sns
from sklearn.metrics import classification_report # 分类报告 
from sklearn.metrics import confusion_matrix # 混淆矩阵 
y_prob=model.predict(x_test) # 对测试集进行预测 
y_pred=np.where(y_prob > 0.5, 1, 0) #将概率值转换成真值
print(y_pred)
cm=confusion_matrix(y_pred, y_test) 
print('Confusion matrix:\n', cm, '\n') 
print(classification_report(y_pred, y_test))
plt.title("CNN+RNN Confusion Matrix") # 标题
sns.heatmap(cm, annot=True, cmap="cool", fmt="d", cbar=False) # 热力图设定 
plt.show() # 显示混淆矩阵
plt.close()
