import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader


# 检查是否有可用的 GPU 设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用的设备: {device}")
print("开始训练...")

# 读取数据
file_path = 'C:\\Users\\严如梦\\Desktop\\文献2023\\康缘\\其他\\300701\\副本T3007A.xlsx'
sheet_name = 'Sheet2'
data = pd.read_excel(file_path, sheet_name=sheet_name)

# 特征提取，提取从第三列到倒数第二列的所有列名作为特征列
feature_columns = data.columns[2:-1]
batch1 = data[(data.iloc[:, 1] == 'Z241028-1')].iloc[0:382]
batch2 = data[(data.iloc[:, 1] == 'Z241029-1')].iloc[0:392]
# 左闭右开区间，所以它包含索引 424 到 814 的行。

# 定义 preprocess_batch 函数将数据转换为适合模型输入的格式
def preprocess_batch(batch):
    X = batch.iloc[:, 2:-1].apply(pd.to_numeric, errors='coerce').values
    Y = batch.iloc[:, -1].apply(pd.to_numeric, errors='coerce').values  # 最后一列（即目标变量列）
    Y = Y.reshape(-1, 1)  # 确保 Y 的形状为 (batch_size, 1)
    return X, Y


X_batch1, Y_batch1 = preprocess_batch(batch1)
X_batch2, Y_batch2 = preprocess_batch(batch2)


batches = [(X_batch1, Y_batch1), (X_batch2, Y_batch2)]
batch_names = ['Z241028-1', 'Z241029-1']


# 创建滑动窗口数据集
def create_sliding_window_dataset(X, Y, window_size):
    Xs, Ys = [], []
    for i in range(len(X) - window_size):
        Xs.append(X[i:i + window_size])
        Ys.append(Y[i + window_size])
    return np.array(Xs), np.array(Ys)


window_size = 4  # 定义滑动窗口大小
for i, (X, Y) in enumerate(batches):
    X, Y = create_sliding_window_dataset(X, Y, window_size)
    batches[i] = (X, Y)  # 更新批次数据
    print(f"批号 {batch_names[i]} 数据集大小: {X.shape}")
    print(f"批号 {batch_names[i]} 数据集大小: {Y.shape}")


# 定义 LSTM 神经网络模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_units, num_layers):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_units, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_units, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        # 正确处理 batch_first=True 的情况
        out = self.fc(out[:, -1, :])
        return out


# 定义目标函数，训练 lstm 模型
def train_lstm_model(hidden_units, num_layers, X, Y, batch_size=32, epochs=500):
    X = torch.tensor(X, dtype=torch.float32).to(device)
    Y = torch.tensor(Y, dtype=torch.float32).to(device)

    # 分割数据为训练集和验证集
    train_size = int(0.8 * len(X))
    X_train, X_val = X[:train_size], X[train_size:]
    Y_train, Y_val = Y[:train_size], Y[train_size:]

    # 使用 DataLoader 来管理数据
    train_dataset = TensorDataset(X_train, Y_train)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_dataset = TensorDataset(X_val, Y_val)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    model = LSTMModel(input_size=X.shape[2], hidden_units=hidden_units, num_layers=num_layers).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    train_losses = []
    val_losses = []

    for epoch in range(epochs):
        model.train()
        epoch_train_loss = 0.0
        for batch_X, batch_Y in train_loader:
            optimizer.zero_grad()
            outputs = model(batch_X)
            loss = criterion(outputs, batch_Y)
            loss.backward()
            optimizer.step()
            epoch_train_loss += loss.item()
        train_losses.append(epoch_train_loss / len(train_loader))

        if epoch % 10 == 0:
            model.eval()
            epoch_val_loss = 0.0
            with torch.no_grad():
                for batch_X, batch_Y in val_loader:
                    outputs = model(batch_X)
                    loss = criterion(outputs, batch_Y)
                    epoch_val_loss += loss.item()
            val_losses.append(epoch_val_loss / len(val_loader))

    return model, train_losses, val_losses


# 主程序
results = []
for i, (X, Y) in enumerate(batches):
    print(f"X2 shape: {X.shape}")
    print(f"Y2 shape: {Y.shape}")

    model, train_losses, val_losses = train_lstm_model(32, 2, X, Y)

    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方框的 bug

    plt.close('all')  # 清空工作区的图片
    plt.figure()
    plt.plot(train_losses, label='训练损失')
    plt.plot(val_losses, label='验证损失')
    plt.legend()
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title(f'批号 {batch_names[i]} 损失曲线')
    plt.show()


    model.eval()
    with torch.no_grad():
        Y_pred = model(torch.tensor(X, dtype=torch.float32).to(device))
        Y_pred = Y_pred.cpu().numpy()  # 将 Y_pred 转换为 numpy.ndarray 类型
        #Y = Y.cpu().numpy()  # 将 Y 转换为 numpy.ndarray 类型
        mse = mean_squared_error(Y, Y_pred)  # 不调用 cpu() 方法
        mae = mean_absolute_error(Y, Y_pred)
        r2 = 1 - mse / np.var(Y)
        results.append((mse, mae, r2))
        print(f"批号 {batch_names[i]}：MSE={mse:.4f}, MAE={mae:.4f}, R2={r2:.4f}")

    plt.figure()
    plt.plot(Y, label='实际值', color='red')
    plt.plot(Y_pred, label='预测值', color='blue')
    plt.legend()
    plt.title(f'批号 {batch_names[i]}：实际值 vs 预测值')
    plt.show()


# 打印结果
best_index = np.argmin([r[0] for r in results])
print(f"最佳模型: 批号 {batch_names[best_index]}")