import numpy as np
import tensorflow as tf
import time
from function_class import function
from tensorflow import keras
from matplotlib import pyplot as plt
from function_class import UNet_model
from sklearn.metrics import confusion_matrix

'''
本代码进行下列比较：
    代码运行的速度、网络收敛的速度、预测的速度、准确率
（1）直接使用一定长度的数据进行分类任务
（2）使用寻峰后切片的数据进行分类任务
（3）寻峰切片后归一化分类
（4）分别对上面两种数据滤波后进行分类任务
'''

path = r'F:\ultralytics-main\data_set\20250417'
minpeak = 0.4
distance = 64
bs=100
bins = 2048

x_train,x_valid,y_train,y_valid= function.ReadAndShuffleNpy(path,per_data=20000,data_len=bins,Min_Peak=minpeak,distance=distance)
x_train_rp = np.array(x_train[0]).reshape([6000,2*distance,1])
x_valid_rp = np.array(x_valid[0]).reshape([1500,2*distance,1])
y_train_rp = np.array(y_train[0]).reshape([6000,3])
y_valid_rp = np.array(y_valid[0]).reshape([1500,3])

x_train_ro = np.array(x_train[2]).reshape([6000,bins,1])
x_valid_ro = np.array(x_valid[2]).reshape([1500,bins,1])
y_train_ro = np.array(y_train[2]).reshape([6000,3])
y_valid_ro = np.array(y_valid[2]).reshape([1500,3])

x_train_no = np.array(x_train[3]).reshape([6000,bins,1])
x_valid_no = np.array(x_valid[3]).reshape([1500,bins,1])
y_train_no = np.array(y_train[3]).reshape([6000,3])
y_valid_no = np.array(y_valid[3]).reshape([1500,3])

x_train_np = np.array(x_train[1]).reshape([6000,2*distance,1])
x_valid_np = np.array(x_valid[1]).reshape([1500,2*distance,1])
y_train_np = np.array(y_train[1]).reshape([6000,3])
y_valid_np = np.array(y_valid[1]).reshape([1500,3])

lr_schedule = 0.00001
early_stopping = keras.callbacks.EarlyStopping(
    monitor='val_accuracy',  # 监控验证集准确率（可以改成 'val_loss' 等其他指标）
    patience=10,            # 容忍验证集指标无改善的轮次（这里设置为10轮，可自行调整）
    restore_best_weights=True  # 是否在训练停止后恢复最优权重
)
adam  = keras.optimizers.Adam(learning_rate=lr_schedule, beta_1=0.9, beta_2=0.999)

model_rp = UNet_model.RebuildUnetChange02(kse=3,kern_reg=None,kern_int_e='he_normal')
model_rp.compile(loss = 'categorical_crossentropy',
              optimizer = adam,
              metrics = ['accuracy']
              )
history_rp = model_rp.fit(x_train_rp,y_train_rp,batch_size=bs ,epochs=200,
                    validation_data=(x_valid_rp,y_valid_rp),callbacks=[early_stopping])

tf.saved_model.save(model_rp, r'D:\PycharmProjects\festival\model_save')

time.sleep(10)

model_np = UNet_model.RebuildUnetChange02(kse=3,kern_reg=None,kern_int_e='he_normal')
model_np.compile(loss = 'categorical_crossentropy',
              optimizer = adam,
              metrics = ['accuracy']
              )
history_np = model_np.fit(x_train_np,y_train_np,batch_size=bs ,epochs=200,
                    validation_data=(x_valid_np,y_valid_np),callbacks=[early_stopping])
function.PrintModelYEDS(model_rp,x_valid_np[8])

time.sleep(10)

model_ro = UNet_model.RebuildUnetChange02(kse=3,kern_reg=None,kern_int_e='he_normal')
model_ro.compile(loss = 'categorical_crossentropy',
              optimizer = adam,
              metrics = ['accuracy']
              )
history_ro = model_ro.fit(x_train_ro,y_train_ro,batch_size=bs ,epochs=200,
                    validation_data=(x_valid_ro,y_valid_ro),callbacks=[early_stopping])

model_no = UNet_model.RebuildUnetChange02(kse=3,kern_reg=None,kern_int_e='he_normal')
model_no.compile(loss = 'categorical_crossentropy',
              optimizer = adam,
              metrics = ['accuracy']
              )
history_no = model_no.fit(x_train_no,y_train_no,batch_size=bs ,epochs=200,
                    validation_data=(x_valid_no,y_valid_no),callbacks=[early_stopping])

val_loss_rp    = history_rp.history['val_loss']
val_acc_rp     = history_rp.history['val_accuracy']
val_loss_np    = history_np.history['val_loss']
val_acc_np     = history_np.history['val_accuracy']
val_loss_ro    = history_ro.history['val_loss']
val_acc_ro     = history_ro.history['val_accuracy']
val_loss_no    = history_no.history['val_loss']
val_acc_no     = history_no.history['val_accuracy']

epochs_rp_loss = range(1, len(val_loss_rp) + 1)
epochs_np_loss = range(1, len(val_loss_np) + 1)
epochs_ro_loss = range(1, len(val_loss_ro) + 1)
epochs_no_loss = range(1, len(val_loss_no) + 1)

# 创建画布
plt.figure(figsize=(12, 5))

# 绘制损失率曲线
plt.subplot(1, 2, 1)
#plt.plot(epochs, train_loss_01, 'bo-', label='Training Loss by Xu')
plt.plot(epochs_rp_loss, val_loss_rp, 'g.-', label='raw_piece'   )
plt.plot(epochs_np_loss, val_loss_np, 'r.-', label='norm_piece ' )
plt.plot(epochs_ro_loss, val_loss_ro, 'b.-', label='raw_original')
plt.plot(epochs_no_loss, val_loss_no, 'y.-', label='norm_original')
plt.title('Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
# 绘制准确度曲线
plt.subplot(1, 2, 2)
#plt.plot(epochs, train_acc_01, 'bo-', label='Training Accuracy by Xu')
plt.plot(epochs_rp_loss, val_acc_rp, 'g.-', label='raw_piece'   )
plt.plot(epochs_np_loss, val_acc_np, 'r.-', label='norm_piece ' )
plt.plot(epochs_ro_loss, val_acc_ro, 'b.-', label='raw_original')
plt.plot(epochs_no_loss, val_acc_no, 'y.-', label='norm_original')
plt.title('Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# 显示图像
plt.tight_layout()
plt.show()