import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# 9. 可视化训练过程（可选）
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn.preprocessing import MinMaxScaler
import time

WINDOW_SIZE = 65  # 窗口大小
EPOCHS = 50  # 训练轮数
Y = 2  # 目标变量列索引（倒数第几列）

#  读取数据，显示数据量和表头数
df = pd.read_excel(
    "./TS-272无归一化.xlsx", parse_dates=["时间"], index_col=[0],skiprows=[0]
)  # 读取数据，跳过第一行，设置dt列为索引

# 数据概览
print("展示前3行")
print(df.head(3))  # 展示前3行

# --------------------------------------- 数据划分 -----------------------------------
train_data, test_data = train_test_split(df, test_size=0.2, shuffle=False)
print("训练集和测试集数据量")
print(train_data.shape)
print(test_data.shape)
print()
# --------------------------------------- 数据划分 -----------------------------------

# --------------------------------------- 数据归一化 -----------------------------------
last_col_scaler = MinMaxScaler(feature_range=(0, 1))
last_col_scaler.fit_transform(
    np.array(train_data.iloc[:, -Y]).reshape(-1, 1)
)  # 训练最后一个特征的归一化器
last_col_scaler.transform(np.array(test_data.iloc[:, -Y]).reshape(-1, 1))

scaler = MinMaxScaler(feature_range=(0, 1))  # 将数据缩放到0-1之间
train_data_scaled = scaler.fit_transform(train_data)
test_data_scaled = scaler.transform(test_data)
# --------------------------------------- 数据归一化 -----------------------------------


# -------------------------------------- 构建数据集 -----------------------------------
def createXY(dataset, window_size):
    dataX = []
    dataY = []

    # 确保数据集长度大于等于 window_size
    if len(dataset) <= window_size:
        raise ValueError("Dataset length should be greater than window_size.")

    # 构建自变量 dataX 和因变量 dataY
    for i in range(window_size, len(dataset)):
        # 提取历史窗口作为自变量
        # 包括第 i-window_size 行到第 i 行的所有列数据
        past_window = dataset[i - window_size : i, :]
        dataX.append(past_window)

        # 提取对应的因变量值
        target_value = dataset[i, -Y]  # 假设最后一列是目标变量
        dataY.append(target_value)

    return np.array(dataX), np.array(dataY)


trainX_scaled, trainY_scaled = createXY(train_data_scaled, WINDOW_SIZE)  # 生成训练向量
print("trainX.shape:", trainX_scaled.shape)
print("trainY.shape:", trainY_scaled.shape)
testX_scaled, testY_scaled = createXY(test_data_scaled, WINDOW_SIZE)
print("testX.shape:", testX_scaled.shape)
print("testY.shape:", testY_scaled.shape)
# -------------------------------------- 构建数据集 -----------------------------------


# --------------------------------------- 构建模型 -----------------------------------
model = Sequential()
model.add(
    LSTM(
        128,
        return_sequences=True,
        input_shape=(trainX_scaled.shape[1], trainX_scaled.shape[2]),
    )
)
model.add(LSTM(100))
model.add(Dropout(0.2))  # 避免过拟合，浅层神经网络中一般设置为0.2
model.add(Dense(1))  # 输出层，只有一个神经元，用于预测目标变量
model.compile(loss="mse", optimizer="adam")  # 使用mse（均方误差 L2）作为损失函数

# 打印模型摘要
print(model.summary())
print()
# --------------------------------------- 开始训练 -----------------------------------
import time

t1 = time.perf_counter()
print("开始训练...")
history = model.fit(
    trainX_scaled,
    trainY_scaled,
    epochs=EPOCHS,
    batch_size=WINDOW_SIZE,
    validation_data=(testX_scaled, testY_scaled),
    verbose=1,
)
t2 = time.perf_counter()
total_time = t2 - t1
print("训练时间：", total_time, "s")
# --------------------------------------- 结束训练 -----------------------------------

# -------------------------------------- 预测并反归一化 -----------------------------------
predictions_scaled = model.predict(testX_scaled)
print("predictions_scaled.shape", predictions_scaled.shape)
predictions = last_col_scaler.inverse_transform(predictions_scaled)
print("predictions.shape", predictions.shape)
print("testY_scaled.shape", testY_scaled.shape)
# ---------------------------------------- 均方误差 -----------------------------------
testY = last_col_scaler.inverse_transform(testY_scaled.reshape(-1, 1))
print(
    "均方误差：",
    np.sqrt(np.mean((predictions - testY) ** 2)),
)

from sklearn.metrics import r2_score

r2 = r2_score(testY, predictions)
print(f"R^2=: {r2}")

model.save("my_model.keras")  # 保存模型
plt.rcParams["font.sans-serif"] = ["SimHei"]  # 设置中文显示字体为黑体
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示为方块的问题
plt.plot(history.history["loss"], label="loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.legend()
plt.title("LSTM Loss")
plt.grid(True)
plt.legend()
# 保存图表到本地文件，格式为PNG
plt.savefig("loss.png")
plt.show()

# 转换日期格式
dates = test_data.index  # 假设时间信息在索引列中

# 对比预测结果和真实值
plt.plot(
    dates[: len(predictions)],
    testY,
    label="真实值",
)
plt.plot(
    dates[: len(predictions)],
    predictions,
    label="预测值",
    linestyle="--",
)
plt.title("真实值与预测值对比")  # 图表标题
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))  # 设置日期格式
plt.xticks(
    dates[: len(predictions)][::10], rotation=45
)  # 选择每隔30天显示一个日期，旋转x轴标签，以免重叠
plt.legend()  # 显示图例
plt.grid(True)  # 显示网格线
plt.savefig("真实值与预测值对比.png")
plt.show()
