import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat

# 准备数据
data = loadmat("ECG.mat")["ECG"]

data = data.reshape(-1)
train_data = data[:10000]
test_data = data[10000:20000]

# 数据预处理
Windows = 100
train_x = []
train_y = []
for i in range(len(train_data) - Windows):
    train_x.append(train_data[i:i + Windows])
    train_y.append(train_data[i + Windows])

train_x = np.array(train_x)
train_y = np.array(train_y)

train_x = torch.from_numpy(train_x).float()
train_y = torch.from_numpy(train_y).float()
train_y = train_y.unsqueeze(1)

test_x = []
test_y = []
for i in range(len(test_data) - Windows):
    test_x.append(test_data[i:i + Windows])
    test_y.append(test_data[i + Windows])

test_x = np.array(test_x)
test_y = np.array(test_y)

test_x = torch.from_numpy(test_x).float()
test_y = torch.from_numpy(test_y).float()
test_y = test_y.unsqueeze(1)

# 定义模型
class LSTM(nn.Module):
    def __init__(self):
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(input_size=1, hidden_size=64, num_layers=1, batch_first=True)
        self.fc = nn.Linear(64, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.fc(out[:, -1, :])
        return out
    
model = LSTM()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    outputs = model(train_x.unsqueeze(2))
    loss = criterion(outputs, train_y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item()))

# 测试模型
model.eval()
predict = model(test_x.unsqueeze(2))
predict = predict.detach().numpy()

plt.plot(test_y, label="True")
plt.plot(predict, label="Predict")
plt.legend()
plt.show()


