'''
5/9完成，第一个可以实现CNN模型训练的程序
data_set为训练该模型使用的数据集
'''

import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Input, Dense, Concatenate, Conv1D, Flatten, MaxPooling1D

data_file_lis = []
for item in os.listdir():
    if 'dataset_from' in item:
        data_file_lis.append(item)

# 提取输入特征和输出标签
X = []
Y = []

print(data_file_lis)

for data_file in data_file_lis:
    with open(data_file, "r", encoding="utf-8") as data_set_file:
        data_set_file_info = data_set_file.read()

    # 创建一个空字典作为全局命名空间
    global_namespace_1 = {}
    exec(data_set_file_info, global_namespace_1)

    # 假设数组变量名为 res_list
    dataset_lis = global_namespace_1.get("res_list")

    for item in dataset_lis:  # 假设频谱数据包含所有输入
        X.append(item[0])   # 输入特征 (88,)
        Y.append(np.hstack([item[2],item[3]]))  # 输出标签前88项

X = np.array(X)  # 转换为NumPy数组
Y = np.array(Y)

print(Y[0])

# 打印特征和标签的形状以确保没有问题
print(f'Input shape: {X.shape}')
print(f'Output shape 1: {Y.shape}')

# 拆分数据集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=42)

# 定义输入层，假设输入是一维的，形状为 (88, 1)
input_shape = (88, 1)
inputs = Input(shape=input_shape)
flattened_input = Flatten()(inputs)
act_input = Dense(128, activation='tanh')(flattened_input)

# 第一层：卷积层，使用8种不同的卷积核
conv_outputs = []
conv_1 = []
for num in range(7):  # 使用1到66的卷积核
    kernel_sizes = [5, 9, 15, 21, 33, 44, 66]
    kernel_num = [16, 16, 16, 8, 8, 4, 4]
    conv_layer = Conv1D(filters=kernel_num[num], kernel_size=kernel_sizes[num], activation='relu')(inputs)
    conv_1.append(conv_layer)

conv_2 = []
for layer in conv_1:
    conv_layer = Conv1D(8, kernel_size=5, activation='relu')(layer)
    pool_layer = MaxPooling1D(pool_size=2, strides=2, padding='valid')(conv_layer)
    conv_2.append(pool_layer)

conv_3 = []
for layer in conv_2:
    conv_layer = Conv1D(8, kernel_size=3, activation='relu')(layer)
    pool_layer = MaxPooling1D(pool_size=2, strides=2, padding='valid')(conv_layer)
    flattened = Flatten()(pool_layer)  # 将卷积结果展平
    conv_outputs.append(flattened)

merged_of_conv_1 = Concatenate()(conv_outputs)

# 将卷积结果与输入层直接连接
merged_of_conv_2 = Concatenate()([act_input] + conv_outputs)

# 后续的全连接层
x = Dense(256, activation='relu')(merged_of_conv_2)
x = Dense(144, activation='relu')(x)


# 输出层，96个输出，使用sigmoid激活
outputs = Dense(96, activation='sigmoid')(x)

# 构建模型
model = Model(inputs=inputs, outputs=outputs)

# 输出模型总结
#model.summary()

# 编译模型
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['binary_accuracy'])

# 将输入数据调整为模型所需的形状 (batch_size, 88, 1)
X_train = X_train.reshape(-1, 88, 1)
X_test = X_test.reshape(-1, 88, 1)

# 训练模型
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=8, batch_size=256)

#保存模型
model.save('note_pre.keras')  # 保存为 Keras 格式

# 评估模型
loss, accuracy = model.evaluate(X_test, Y_test)
print(f'Test loss: {loss}, Test accuracy: {accuracy}')