# -*- coding: utf-8 -*-
"""
Created on Thu Mar 20 22:00:42 2025

@author: LZM
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# 设置中文字体（Windows 系统推荐 'SimHei'，Mac/Linux 需先安装字体）
plt.rcParams['font.family'] = ['SimHei']  # 黑体
# 解决负号显示为方块的问题
plt.rcParams['axes.unicode_minus'] = False

# 激活函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))
# 均方误差损失
def mse_loss(y_true, y_pred):
    return np.mean((y_true - y_pred) ** 2)
# 初始化 LSTM 参数
def init_lstm_params(input_size, hidden_size, output_size):
    params = {}
    np.random.seed(42)
    for gate in ['f', 'i', 'c', 'o']:
        params[f'W_{gate}'] = np.random.randn(hidden_size, hidden_size + input_size) * 0.01
        params[f'b_{gate}'] = np.zeros((hidden_size, 1))
    # 输出层
    params['W_y'] = np.random.randn(output_size, hidden_size) * 0.01
    params['b_y'] = np.zeros((output_size, 1))
    return params
# 单步前向传播
def lstm_cell_forward(x_t, h_prev, C_prev, params):
    concat = np.vstack((h_prev, x_t))
    f_t = sigmoid(np.dot(params['W_f'], concat) + params['b_f'])
    i_t = sigmoid(np.dot(params['W_i'], concat) + params['b_i'])
    C_tilde = np.tanh(np.dot(params['W_c'], concat) + params['b_c'])
    C_t = f_t * C_prev + i_t * C_tilde
    o_t = sigmoid(np.dot(params['W_o'], concat) + params['b_o'])
    h_t = o_t * np.tanh(C_t)
    cache = (h_t, h_prev, C_prev, f_t, i_t, C_tilde, C_t, o_t, x_t, concat)
    return h_t, C_t, cache
# LSTM 前向传播
def lstm_forward(x_seq, params):
    hidden_size = params['W_f'].shape[0]
    h_prev = np.zeros((hidden_size, 1))
    C_prev = np.zeros((hidden_size, 1))
    caches = []
    for x_t in x_seq:
        x_t = x_t.reshape(-1, 1)
        h_t, C_t, cache = lstm_cell_forward(x_t, h_prev, C_prev, params)
        caches.append(cache)
    y_pred = np.dot(params['W_y'], h_t) + params['b_y']
    return y_pred, caches

# LSTM 反向传播
def lstm_backward(y_true, y_pred, caches, params, learning_rate=0.0001):
    grads = {key: np.zeros_like(value) for key, value in params.items()}
    dy = 2 * (y_pred - y_true)  # dL/dy
    h_t = caches[-1][0]
    dW_y = np.dot(dy, h_t.T)  # 计算 dW_y
    db_y = dy  # 计算 db_y
    grads['W_y'] += dW_y
    grads['b_y'] += db_y
    dh_next = np.dot(params['W_y'].T, dy)
    dC_next = np.zeros_like(caches[0][2])
    for cache in reversed(caches):
        h_t, h_prev, C_prev, f_t, i_t, C_tilde, C_t, o_t, x_t, concat = cache
        do = dh_next * np.tanh(C_t) * o_t * (1 - o_t)
        dC = dh_next * o_t * (1 - np.tanh(C_t) ** 2) + dC_next
        dC_tilde = dC * i_t * (1 - C_tilde ** 2)
        di = dC * C_tilde * i_t * (1 - i_t)
        df = dC * C_prev * f_t * (1 - f_t)
        grads['W_o'] += np.dot(do, concat.T)
        grads['b_o'] += do
        grads['W_c'] += np.dot(dC_tilde, concat.T)
        grads['b_c'] += dC_tilde
        grads['W_i'] += np.dot(di, concat.T)
        grads['b_i'] += di
        grads['W_f'] += np.dot(df, concat.T)
        grads['b_f'] += df
        dh_next = np.dot(params['W_f'][:, :h_prev.shape[0]].T, df) + \
                  np.dot(params['W_i'][:, :h_prev.shape[0]].T, di) + \
                  np.dot(params['W_c'][:, :h_prev.shape[0]].T, dC_tilde) + \
                  np.dot(params['W_o'][:, :h_prev.shape[0]].T, do)
        dC_next = dC * f_t
    for key in params.keys():
        params[key] -= learning_rate * grads[key]

# 训练 LSTM
def train_lstm(x_train, y_train, params, learning_rate=0.0001, epochs=100, batch_size=64):
    num_samples = len(x_train)
    num_batches = num_samples // batch_size
    for epoch in range(epochs):
        total_loss = 0
        for batch in range(num_batches):
            start_idx = batch * batch_size
            end_idx = start_idx + batch_size
            x_batch = x_train[start_idx:end_idx]
            y_batch = y_train[start_idx:end_idx]
            batch_loss = 0
            for i in range(len(x_batch)):
                x_seq = x_batch[i]
                y_true = y_batch[i]
                # 前向传播
                y_pred, caches = lstm_forward(x_seq, params)
                loss = mse_loss(y_true, y_pred)
                batch_loss += loss
            # 反向传播，这里可以根据实际情况调整为对整个小批量计算梯度
            for i in range(len(x_batch)):
                x_seq = x_batch[i]
                y_true = y_batch[i]
                y_pred, caches = lstm_forward(x_seq, params)
                lstm_backward(y_true, y_pred, caches, params, learning_rate)
            total_loss += batch_loss
        if (epoch + 1) % 10 == 0:
            print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / num_samples}")

# 读取真实风速数据
def load_real_wind_data(filename):
    df = pd.read_csv(filename, skiprows=4)
    wind_speeds = df.iloc[:, 8].to_numpy()[:10000]
    wind_speeds_min = np.min(wind_speeds)
    wind_speeds_max = np.max(wind_speeds)
    wind_speeds = (wind_speeds - np.min(wind_speeds)) / (np.max(wind_speeds) - np.min(wind_speeds))  # 归一化
    # 生成训练数据
    x_train = []
    y_train = []
    for i in range(len(wind_speeds) - 10):
        x_train.append(wind_speeds[i:i + 10].reshape(-1, 1))
        y_train.append(wind_speeds[i + 10])
    return np.array(x_train), np.array(y_train).reshape(-1, 1), wind_speeds_min, wind_speeds_max

# 训练 LSTM
input_size = 1
hidden_size = 128
output_size = 1
params = init_lstm_params(input_size, hidden_size, output_size)
# 读取真实数据
x_train, y_train, wind_speeds_min, wind_speeds_max = load_real_wind_data('Ada1lzy.csv')
# 训练 LSTM
train_lstm(x_train, y_train, params)

# 使用训练好的 LSTM 进行预测
def lstm_predict(x_test, params):
    predictions = []
    for x_seq in x_test:
        y_pred, _ = lstm_forward(x_seq, params)
        predictions.append(y_pred.item())  # 转换为标量
    return np.array(predictions)

# 加载测试数据
def load_test_wind_data(filename, wind_speeds_min, wind_speeds_max):
    dfa = pd.read_csv(filename, skiprows=10004)
    wind_speeds = dfa.iloc[:, 8].to_numpy()[:200]
    wind_compare_true = dfa.iloc[:, 8].to_numpy()[9:199]
    wind_speeds = (wind_speeds - wind_speeds_min) / (wind_speeds_max - wind_speeds_min)  # 归一化
    x_test = []
    y_test = []
    for i in range(len(wind_speeds) - 10):
        x_test.append(wind_speeds[i:i + 10].reshape(-1, 1))
        y_test.append(wind_speeds[i + 10])
    return np.array(x_test), np.array(y_test).reshape(-1, 1), wind_compare_true

# 读取测试数据
x_test, y_test, wind_compare_true = load_test_wind_data('Ada1lzy.csv', wind_speeds_min, wind_speeds_max)
# 进行预测
y_pred = lstm_predict(x_test, params)

# 可视化真实值 vs. 预测值
def denormalize(data, min_val, max_val):
    return data * (max_val - min_val) + min_val

y_pred_den = denormalize(y_pred, wind_speeds_min, wind_speeds_max)

# 计算误差
mse = mse_loss(wind_compare_true, y_pred_den)
print(f"测试集 MSE: {mse:.4f}")
mae = np.mean(np.abs(wind_compare_true - y_pred_den))
print(f"测试集 MAE: {mae:.4f}")

plt.figure(figsize=(10, 5))
plt.plot(wind_compare_true, label="真实风速", linestyle="-", marker="o")
plt.plot(y_pred_den, label="预测风速", linestyle="--", marker="x")
plt.xlabel("时间步")
plt.ylabel("风速（归一化）")
plt.title("LSTM 预测 vs. 真实风速")
plt.legend()
plt.grid()
plt.show()
    