import numpy as np
import out
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt


# 检查是否有可用的 GPU 设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用的设备: {device}")


# 读取数据
file_path = 'C:\\Users\\严如梦\\Desktop\\文献2023\\康缘\\其他\\300701\\副本T3007A.xlsx'
sheet_name = 'Sheet2'
data = pd.read_excel(file_path, sheet_name=sheet_name)


# 特征提取，提取从第三列到倒数第二列的所有列名作为特征列
feature_columns = data.columns[2:-1]
batch1 = data[data.iloc[:, 1] == 'Z241028-1']
batch2 = data[data.iloc[:, 1] == 'Z241029-1']


def preprocess_batch(batch):
    X = batch.iloc[:, 2:-1].apply(pd.to_numeric, errors='coerce').values
    Y = batch.iloc[:, -1].apply(pd.to_numeric, errors='coerce').values
    return X, Y


X_batch1, Y_batch1 = preprocess_batch(batch1)
X_batch2, Y_batch2 = preprocess_batch(batch2)


batches = [(X_batch1, Y_batch1), (X_batch2, Y_batch2)]
batch_names = ['Z241028-1', 'Z241029-1']


def preprocess_batch(batch):
    X = batch.iloc[:, 2:-1].apply(pd.to_numeric, errors='coerce').values
    Y = batch.iloc[:, -1].apply(pd.to_numeric, errors='coerce').values
    return X, Y


X_batch1, Y_batch1 = preprocess_batch(batch1)
X_batch2, Y_batch2 = preprocess_batch(batch2)


batches = [(X_batch1, Y_batch1), (X_batch2, Y_batch2)]
batch_names = ['Z241028-1', 'Z241029-1']


# 定义 LSTM 模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_units, num_layers):
        """
        初始化 LSTM 模型,定义LSTM层
        :param input_size: 输入特征维度
        :param hidden_units: 隐藏单元数量
        :param num_layers: LSTM 层数
        """
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, int(hidden_units), int(num_layers), batch_first=True)
        self.fc = nn.Linear(int(hidden_units), 1)
        # batch_first=True：输入的第一个维度是batch大小


    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.fc(out[:, -1, :])
        return out

# 粒子群优化 (PSO)
def pso_optimize(obj_func, n_particles, max_iter, velocity_limit, search_range, c1, c2, w):
    positions = np.random.randint(search_range[0], search_range[1], (n_particles, 2))
    velocities = np.random.uniform(velocity_limit[0], velocity_limit[1], (n_particles, 2))
    pbest = positions.copy()
    pbest_scores = np.array([obj_func(pos[0], pos[1]) for pos in positions])
    gbest = positions[np.argmin(pbest_scores)]
    gbest_score = pbest_scores.min()


    for _ in range(max_iter):
        for i in range(n_particles):
            r1, r2 = np.random.rand(2)
            velocities[i] = (
                w * velocities[i]
                + c1 * r1 * (pbest[i] - positions[i])
                + c2 * r2 * (gbest - positions[i])
            )
            positions[i] = np.clip(positions[i] + velocities[i], *search_range)
            score = obj_func(int(positions[i][0]), int(positions[i][1]))
            if score < pbest_scores[i]:
                pbest[i], pbest_scores[i] = positions[i], score
                if score < gbest_score:
                    gbest, gbest_score = pbest[i], score
    return gbest, gbest_score


# 定义目标函数
def train_lstm_model(hidden_units, num_layers, X, Y):
    # 数据处理
    X = torch.tensor(X, dtype=torch.float32).unsqueeze(-1).to(device)
    Y = torch.tensor(Y, dtype=torch.float32).unsqueeze(-1).to(device)
    X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, random_state=42)


    model = LSTMModel(input_size=X.shape[2], hidden_units=hidden_units, num_layers=num_layers).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)


    # 训练
    for epoch in range(300):
        model.train()
        optimizer.zero_grad()
        outputs = model(X_train)
        loss = criterion(outputs, Y_train)
        loss.backward()
        optimizer.step()


    # 验证
    model.eval()
    with torch.no_grad():
        Y_pred = model(X_val)
        val_loss = mean_squared_error(Y_val.cpu().numpy(), Y_pred.cpu().numpy())
    return val_loss


# 主程序
results = []
for i, (X, Y) in enumerate(batches):
    def objective(hidden_units, num_layers):
        return train_lstm_model(hidden_units, num_layers, X, Y)

    X = torch.tensor(X, dtype=torch.float32).unsqueeze(-1).to(device)
    Y = torch.tensor(Y, dtype=torch.float32).unsqueeze(-1).to(device)
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)


    model = LSTMModel(input_size=X.shape[2], hidden_units=32, num_layers=2).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)


    for epoch in range(500):
        model.train()
        optimizer.zero_grad()
        outputs = model(X_train)
        loss = criterion(outputs, Y_train)
        loss.backward()
        optimizer.step()


    model.eval()
    with torch.no_grad():
        Y_train_pred = model(X_train).flatten()
        Y_test_pred = model(X_test).flatten()


    mse = mean_squared_error(Y_test.cpu().numpy(), Y_test_pred.cpu().numpy())
    mae = mean_absolute_error(Y_test.cpu().numpy(), Y_test_pred.cpu().numpy())
    r2 = 1 - mse / np.var(Y_test.cpu().numpy())
    results.append((mse, mae, r2))


    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方框的bug


    plt.figure()
    plt.plot(Y_train.cpu().numpy(), label='训练集实际值', color='red')
    plt.plot(Y_train_pred.cpu().numpy(), label='训练集预测值', color='blue')
    plt.legend()
    plt.title(f'批号 {batch_names[i]} 训练集：实际值 vs 预测值')
    plt.show()


    plt.figure()
    plt.plot(Y_test.cpu().numpy(), label='测试集实际值', color='red')
    plt.plot(Y_test_pred.cpu().numpy(), label='测试集预测值', color='blue')
    plt.legend()
    plt.title(f'批号 {batch_names[i]} 测试集：实际值 vs 预测值')
    plt.show()


# 打印结果
best_index = np.argmin([r[0] for r in results])
print(f"最佳模型: 批号 {batch_names[best_index]}")

print(f"LSTM input shape: {X.shape}")
print(f"LSTM output shape: {out.shape}")
print(f"FC input shape: {out[:, -1, :].shape}")
print(f"FC output shape: {out.shape}")