import torch
import torch.optim as optim
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from lstmmodel import LSTMModel
from data import load_data
from pso import pso_optimize


def train_lstm_model(hidden_units, num_layers, X, Y, device):
    """
    训练 LSTM 模型并返回验证集的均方误差
    :param hidden_units: 隐藏单元数
    :param num_layers: LSTM 层数
    :param X: 输入数据
    :param Y: 目标数据
    :param device: 设备（GPU 或 CPU）
    :return: 验证集的均方误差
    """
    # 数据处理
    X = torch.tensor(X, dtype=torch.float32).unsqueeze(-1).to(device)
    Y = torch.tensor(Y, dtype=torch.float32).unsqueeze(-1).to(device)
    X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, random_state=42)

    model = LSTMModel(input_size=X.shape[2], hidden_units=hidden_units, num_layers=num_layers).to(device)
    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 训练
    for epoch in range(300):
        model.train()
        optimizer.zero_grad()
        outputs = model(X_train)
        loss = criterion(outputs, Y_train)
        loss.backward()
        optimizer.step()

    # 验证
    model.eval()
    with torch.no_grad():
        Y_pred = model(X_val)
        val_loss = mean_squared_error(Y_val.cpu().numpy(), Y_pred.cpu().numpy())
    return val_loss


def evaluate_model(best_units, best_layers, X, Y, device):
    """
    使用最佳参数训练并评估模型
    :param best_units: 最佳隐藏单元数
    :param best_layers: 最佳层数
    :param X: 输入数据
    :param Y: 目标数据
    :param device: 设备（GPU 或 CPU）
    :return: mse, mae, r2
    """
    X = torch.tensor(X, dtype=torch.float32).unsqueeze(-1).to(device)
    Y = torch.tensor(Y, dtype=torch.float32).unsqueeze(-1).to(device)
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)

    model = LSTMModel(input_size=X.shape[2], hidden_units=best_units, num_layers=best_layers).to(device)
    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    for epoch in range(500):
        model.train()
        optimizer.zero_grad()
        outputs = model(X_train)
        loss = criterion(outputs, Y_train)
        loss.backward()
        optimizer.step()

    model.eval()
    with torch.no_grad():
        Y_train_pred = model(X_train).flatten()
        Y_test_pred = model(X_test).flatten()

    mse = mean_squared_error(Y_test.cpu().numpy(), Y_test_pred.cpu().numpy())
    mae = mean_absolute_error(Y_test.cpu().numpy(), Y_test_pred.cpu().numpy())
    r2 = 1 - mse / np.var(Y_test.cpu().numpy())

    return mse, mae, r2, Y_train.cpu().numpy(), Y_train_pred.cpu().numpy(), Y_test.cpu().numpy(), Y_test_pred.cpu().numpy()


def plot_results(batch_name, Y_train, Y_train_pred, Y_test, Y_test_pred):
    """
    绘制训练集和测试集的实际值与预测值对比图
    :param batch_name: 批号名称
    :param Y_train: 训练集实际值
    :param Y_train_pred: 训练集预测值
    :param Y_test: 测试集实际值
    :param Y_test_pred: 测试集预测值
    """
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方框的bug

    plt.figure()
    plt.plot(Y_train, label='训练集实际值', color='red')
    plt.plot(Y_train_pred, label='训练集预测值', color='blue')
    plt.legend()
    plt.title(f'批号 {batch_name} 训练集：实际值 vs 预测值')
    plt.show()

    plt.figure()
    plt.plot(Y_test, label='测试集实际值', color='red')
    plt.plot(Y_test_pred, label='测试集预测值', color='blue')
    plt.legend()
    plt.title(f'批号 {batch_name} 测试集：实际值 vs 预测值')
    plt.show()


def main():
    # 检查是否有可用的 GPU 设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用的设备: {device}")

    # 读取数据
    file_path = 'C:\\Users\\严如梦\\Desktop\\文献2023\\康缘\\其他\\300701\\副本T3007A.xlsx'
    sheet_name = 'Sheet2'
    batches, batch_names = load_data(file_path, sheet_name)

    results = []
    for i, (X, Y) in enumerate(batches):
        print(train_lstm_model(10, 1, X, Y, device))
        # def objective(params):
        #     hidden_units, num_layers = params
        #     return train_lstm_model(int(hidden_units), int(num_layers), X, Y, device)

    #     gbest, _ = pso_optimize(
    #         objective,
    #         n_particles=10,
    #         max_iter=5,
    #         velocity_limit=(-1, 1),
    #         search_range=(5, 50),
    #         c1=1.8,
    #         c2=1.8,
    #         w=0.7
    #     )
    #
    #     best_units, best_layers = map(int, gbest)
    #     print(f"批号 {batch_names[i]}: 最佳隐藏单元数: {best_units}, 最佳层数: {best_layers}")
    #
    #     mse, mae, r2, Y_train, Y_train_pred, Y_test, Y_test_pred = evaluate_model(best_units, best_layers, X, Y, device)
    #     results.append((mse, mae, r2))
    #
    #     plot_results(batch_names[i], Y_train, Y_train_pred, Y_test, Y_test_pred)
    #
    # # 打印结果
    # best_index = np.argmin([r[0] for r in results])
    # print(f"最佳模型: 批号 {batch_names[best_index]}")


if __name__ == "__main__":
    main()
