"""
Environment:    tensorflow              2.16.1
                keras                   3.2.1
                python                  3.12.2
Date:           2024/4/24 9:14  
"""

import keras
import numpy as np
import matplotlib.pyplot as plt

"""
1. Processing the data:
    outputData.py
    获取到的CSV文件JSON格式有问题 键没有引号 用这个文件解析出训练集压力数据到data/Output.csv
    测试集到data/test.csv 
    
    1异常 0正常
"""

"""
2. Read data from CSV file:
    读取CSV数据 转换格式后拉伸并插值统一长度
    # 不要手动编辑CSV文件 Excel保存逗号分隔文件读取时会报错
"""
import csv
from scipy.interpolate import interp1d

"""
@brief: 拉伸时间序列到指定长度并做线性插值填充序列 
@param: curve_samples 二维列表[ [序列1] , [序列2] , ... , [序列n] ]
        target_length 拉伸到指定长度
@ret:   拉伸插值后的时间序列
"""
def stretch_and_interpolate(curve_samples, target_length):
    
    # 初始化一个数组来存储插值填充后的曲线
    interpolated_curves = np.zeros((len(curve_samples), target_length))
    
    # 对每个曲线进行插值填充
    for i, curve in enumerate(curve_samples):
        # 使用插值方法
        f = interp1d(np.linspace(0, 1, len(curve)), curve)
        # 计算插值填充后的曲线
        interpolated_curve = f(np.linspace(0, 1, target_length))
        interpolated_curve_rounded = np.around(interpolated_curve, decimals=1)
        # 将插值填充后的曲线存储到数组中
        interpolated_curves[i] = interpolated_curve_rounded
    
    return interpolated_curves

"""
@brief  读取指定路径文件，格式为
        [Label] [point0] [point1] [point2] ... [pointN]
        [Label] [point0] [point1] [point2] ... [pointN]
        ...
        [Label] [point0] [point1] [point2] ... [pointN]
        @Label 1为异常类 0为正常类 可以多分类 例如每种吸液量分一类
@param  file_path 指定文件路径
@ref    样本数据data_x
        标签数据data_y
"""
def read_and_convert(file_path):
    data = []
    with open(file_path, newline='') as csvfile:
        # 创建一个 CSV 读取器对象
        csv_reader = csv.reader(csvfile)
        # 逐行读取 CSV 文件内容并打印出来
        for row in csv_reader:
            result = ",".join(row)
            data.append(result)

    string_list = []
    for item in data:
        string_list.append(item)
        
    data_X = []
    data_y = []    
    for string in string_list:
        # 分割字符串并将每个数字字符串转换为整数
        number_list_X = [int(num) for num in string[2:].split(',')]
        number_list_Y = [int(num) for num in string[0]]
        data_X.append(number_list_X)
        data_y.append(number_list_Y)
    return data_X, data_y

"""
@brief  z-score标准化数据预处理 
@param  data np.array二维数组
@ret    归一化后数据
"""
def z_score_standardization(data):
    # 计算均值和标准差
    mean = np.mean(data)
    std = np.std(data)
    
    # 对数据进行Z-score标准化
    z_score_data = (data - mean) / std
    
    return z_score_data

"""
@train_path:    训练集文件路径
@test_path:     测试集文件路径
@target_length: 填充序列长度 取决于训练集数据最大长度
"""
train_path = 'data/Output.csv'
test_path = 'data/test.csv'
target_length = 300

train_data_x, train_data_y = read_and_convert(train_path)
interpolated_train_x = stretch_and_interpolate(train_data_x, target_length)
x_train = np.array(interpolated_train_x)
x_train = z_score_standardization(x_train)
y_train = np.array(train_data_y)

test_data_x, test_data_y = read_and_convert(test_path)
interpolated_test_x = stretch_and_interpolate(test_data_x, target_length)
x_test = np.array(interpolated_test_x)
x_test = z_score_standardization(x_test)
y_test = np.array(test_data_y)

classes = np.unique(np.concatenate((y_train, y_test), axis=0))

x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))

num_classes = len(np.unique(y_train))

idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]

# 加载训练完成模型测试
model = keras.models.load_model("best_model.keras")
# model.summary()
# test_loss, test_acc = model.evaluate(x_test, y_test)

# print("Test accuracy", test_acc)
# print("Test loss", test_loss)

y_prob=model.predict(x_test) # 对测试集进行预测 
print(y_prob)
y_pred=np.where(np.array(y_prob).squeeze() > 0.5, 1, 0) #将概率值转换成真值
# 找到预测错误的数据索引
incorrect_indices = np.where(y_pred != y_test.flatten())
# 打印预测错误的数据
# print(incorrect_indices[0])
print("Incorrect predictions:")
for idx in incorrect_indices[0]:
    print("Idx:", idx+1,"Predicted:", y_pred[idx], "Actual:", y_test.flatten()[idx])
    # 这里可以根据需要打印出其他信息，比如输入数据 x_test[idx]
np.set_printoptions(threshold=np.inf)
# print(incorrect_indices[0])
row_group = incorrect_indices[0]
# row_group = [1212]
# 打开 CSV 文件
with open(test_path, newline='') as csvfile:
    reader = csv.reader(csvfile)
    for i, row in enumerate(reader):
        for target_row in row_group:
            if i == target_row-1:
                row = row[1:]
                x = [[int(num) for num in row]]
                padded_x = stretch_and_interpolate(x, target_length)[0]
                plt.plot(np.linspace(0, len(padded_x), len(padded_x)), padded_x)
# print(x)
# x = [2236,2237,2232,2226,2217,2204,2191,2173,2154,2132,2108,2082,2055,2030,2007,1992,1999,1996,2000,2008,2012,2012,2011,2005,2000,1995,1993,1988,1986,1985,1982,1978,1977,1975,1974,1973,1972,1971,1970,1969,1968,1967,1966,1964,1966,1964,1960,1960,1960,1960,1951,1958,2026,2090,2062,2042,2024,2009,1996,1989,1985,1986,1988,1992,1999,2006,2013,2023,2033,2044,2054,2065,2073,2084,2094,2103,2114,2123,2130,2138,2145]

plt.show()
plt.close()
"""

"""
import seaborn as sns
from sklearn.metrics import classification_report # 分类报告 
from sklearn.metrics import confusion_matrix # 混淆矩阵 
y_prob=model.predict(x_test) # 对测试集进行预测 
y_pred=np.where(y_prob > 0.5, 1, 0) #将概率值转换成真值
print(y_pred)
cm=confusion_matrix(y_pred, y_test) 
print('Confusion matrix:\n', cm, '\n') 
print(classification_report(y_pred, y_test))
plt.title("CNN+RNN Confusion Matrix") # 标题
sns.heatmap(cm, annot=True, cmap="cool", fmt="d", cbar=False) # 热力图设定 
plt.show() # 显示混淆矩阵