#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：model_study
@File    ：my_model_one.py
@Author  ：qsy
@Date    ：2024/10/15 14:23
'''

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import matplotlib.pyplot as plt
import numpy as np
import joblib
from mpl_toolkits.mplot3d import Axes3D

# 读取excel文件
data = pd.read_excel('../data/恒转速数据.xlsx')
print(data.head())  # 打印前5行数据

# 取出速度和功率作为输入特征
X = data[['Speed', 'Power']]
print(X.head())

# 取出流量作为输出目标
Y = data[['Flow']]
print(Y.head())

# 把X和Y转换成numpy数组
X = X.values
Y = Y.values

# 随机选择 20% 的数据作为测试集，保留全部的训练集
X_train = X
y_train = Y

# 生成随机索引，选择 20% 的测试数据
test_size = int(0.2 * len(X))
test_indices = np.random.choice(len(X), size=test_size, replace=False)

# 根据随机索引划分测试集
X_test = X[test_indices]
y_test = Y[test_indices]

# 打印数据集大小
print(f'训练集大小: {X_train.shape}, 测试集大小: {X_test.shape}')

# 数据标准化
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)  # 对训练集进行标准化
X_test = scaler.transform(X_test)        # 对测试集进行标准化

scaler2 = StandardScaler()
y_train = scaler2.fit_transform(y_train)  # 对训练集目标值进行标准化
y_test = scaler2.transform(y_test)        # 对测试集目标值进行标准化

# 保存标准化器
joblib.dump(scaler, 'scaler_X.pkl')
joblib.dump(scaler2, 'scaler_y.pkl')

# 构建DNN模型
model = Sequential()
model.add(Dense(32, input_dim=2, activation='relu'))  # 输入层
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))  # 输出层，只预测一个流量值

# 打印模型结构
model.summary()

# 编译模型
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])

# 训练模型
history = model.fit(X_train, y_train, epochs=500, batch_size=3, validation_split=0.2)

# 绘制训练和测试损失
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

# 评估模型
loss, mae = model.evaluate(X_test, y_test)
print(f'模型在测试集上的损失: {loss}, 平均绝对误差: {mae}')

# 预测模型
y_pred = model.predict(X_test)

# 绘制预测结果与真实结果的散点图
plt.scatter(y_test, y_pred, label='pred vs real')
plt.xlabel('真实流量')
plt.ylabel('预测流量')
plt.legend()
plt.show()

# 反标准化
y_pred = scaler2.inverse_transform(y_pred)
y_test = scaler2.inverse_transform(y_test)
x_test = scaler.inverse_transform(X_test)

# 打印预测值和真实值的对比
result = np.concatenate((y_test, y_pred), axis=1)
print(f'预测值和真实值合成的矩阵: \n{result}')

# 打印x_test
print(f'测试集输入数据: \n{x_test}')

print('模型训练完成！')

# 单个值预测
# 输入数据
input_data = np.array([[1050, 205]])
# 标准化
input_data = scaler.transform(input_data)
# 预测
output_data = model.predict(input_data)
# 反标准化
output_data = scaler2.inverse_transform(output_data)  # 反标准化流量数据
# 打印预测结果
print(f'单个值预测结果: {output_data}')

# 保存模型
model.save('my_model_one.keras')

# 生成三维曲面图
# 设置转速和功率的范围
min_speed = X[:, 0].min()  # 最小转速
max_speed = X[:, 0].max()  # 最大转速
min_power = X[:, 1].min()  # 最小功率
max_power = X[:, 1].max()  # 最大功率

# 创建转速和功率的网格数据
speed_range = np.linspace(min_speed, max_speed, 100)  # 生成100个转速值
power_range = np.linspace(min_power, max_power, 100)  # 生成100个功率值
speed_grid, power_grid = np.meshgrid(speed_range, power_range)  # 生成网格点

# 将网格点进行标准化
input_grid = np.column_stack((speed_grid.ravel(), power_grid.ravel()))
input_grid_scaled = scaler.transform(input_grid)  # 对网格点进行标准化


# 使用模型预测流量
predicted_flow = model.predict(input_grid_scaled)

# 将预测的流量重新整理成网格形状
predicted_flow_grid = predicted_flow.reshape(speed_grid.shape)

# 绘制三维曲面
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(speed_grid, power_grid, predicted_flow_grid, cmap='viridis', alpha=0.7)

# 在曲面上添加真实流量的散点
# 反标准化测试集数据中的速度和功率
real_speed = x_test[:, 0]
real_power = x_test[:, 1]
ax.scatter(real_speed, real_power, y_test.flatten(), color='red', label='真实流量', s=10)

ax.set_title('Flow Prediction Surface with Real Values')
ax.set_xlabel('Speed')
ax.set_ylabel('Power')
ax.set_zlabel('Predicted Flow')
ax.legend()

plt.show()
