import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import torch
import torch.nn as nn

# 加载数据集
data = pd.read_csv('./002851.csv')
data['Date'] = pd.to_datetime(data['date'])
data.set_index('Date', inplace=True)

# 绘制收盘价
plt.figure(figsize=(10, 6))
plt.plot(data['close'])
plt.title('Google Stock Price')
plt.xlabel('Date')
plt.ylabel('Closing Price')
plt.show()


from sklearn.preprocessing import MinMaxScaler
import numpy as np

scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data['close'].values.reshape(-1, 1))

# 创建训练和测试数据集
train_size = int(len(scaled_data) * 0.8)
train_data = scaled_data[:train_size]
test_data = scaled_data[train_size:]

def create_dataset(data, time_step=1):
    X, y = [], []
    for i in range(len(data) - time_step - 1):
        X.append(data[i:(i + time_step), 0])
        y.append(data[i + time_step, 0])
    return np.array(X), np.array(y)

time_step = 7  # 设置时间步长
X_train, y_train = create_dataset(train_data, time_step)
X_test, y_test = create_dataset(test_data, time_step)

# 重塑输入数据为 [样本数, 时间步长, 特征数]
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

# 重塑y_train和y_test为[样本数, 1]
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

# 然后再转换为PyTorch张量
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32)

# 定义数据加载器
train_dataset = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

test_dataset = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)


class LSTMModel(nn.Module):
    def __init__(self, input_size=1, hidden_layer_size=50, output_size=1):
        super(LSTMModel, self).__init__()
        self.hidden_layer_size = hidden_layer_size

        self.lstm1 = nn.LSTM(input_size, hidden_layer_size, batch_first=True)
        self.dropout1 = nn.Dropout(0.2)
        self.lstm2 = nn.LSTM(hidden_layer_size, hidden_layer_size, batch_first=True)
        self.dropout2 = nn.Dropout(0.2)
        self.linear = nn.Linear(hidden_layer_size, output_size)

    def forward(self, input_seq):
        lstm_out1, _ = self.lstm1(input_seq)
        lstm_out1 = self.dropout1(lstm_out1)
        lstm_out2, _ = self.lstm2(lstm_out1)
        lstm_out2 = self.dropout2(lstm_out2)
        predictions = self.linear(lstm_out2[:, -1, :])
        return predictions

# 定义模型
model = LSTMModel()

# 定义损失函数和优化器
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 定义损失函数和优化器
loss_function = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练和验证
num_epochs = 50
train_losses = []
val_losses = []

for epoch in range(num_epochs):
    model.train()
    train_loss = 0.0
    for batch_X, batch_y in train_loader:
        optimizer.zero_grad()
        y_pred = model(batch_X)
        loss = loss_function(y_pred, batch_y)
        loss.backward()
        optimizer.step()
        train_loss += loss.item() * batch_X.size(0)

    train_loss = train_loss / len(train_loader.dataset)
    print(f'train_loss:{train_loss}')
    train_losses.append(train_loss)

# 假设模型、scaler、X_train、X_test、y_train、y_test已经准备好
# X_train: [样本数, 时间步长, 特征数]
# y_train: [样本数, 1]
# X_test: [样本数, 时间步长, 特征数]
# y_test: [样本数, 1]

# 转换为PyTorch张量
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)

# 预测训练集和测试集
model.eval()
with torch.no_grad():
    train_predict = model(X_train).numpy()
    test_predict = model(X_test).numpy()

# 反向转换预测值和实际值
train_predict = scaler.inverse_transform(train_predict)
test_predict = scaler.inverse_transform(test_predict)
actual_train = scaler.inverse_transform(y_train.reshape(-1, 1))
actual_test = scaler.inverse_transform(y_test.reshape(-1, 1))

# 修正绘图部分
train_predict_plot = np.empty_like(scaled_data)
train_predict_plot[:, :] = np.nan
train_predict_plot[time_step:len(train_predict) + time_step, :] = train_predict

test_predict_plot = np.empty_like(scaled_data)
test_predict_plot[:, :] = np.nan
test_predict_plot[len(train_predict) + (time_step * 2) + 1:len(scaled_data) - 1, :] = test_predict

# 绘制结果
plt.figure(figsize=(12, 6))
plt.plot(data.index, scaler.inverse_transform(scaled_data), label='Actual Price')
plt.plot(data.index, train_predict_plot, label='Predicted Train Price')
plt.plot(data.index, test_predict_plot, label='Predicted Test Price')
plt.xlabel('data')
plt.ylabel('Stock Price')
plt.legend()
plt.show()





# 获取最后的时间步数据作为初始输入
last_sequence = scaled_data[-time_step:].reshape(1, time_step, 1)  # [1, time_step, 1]

# 转换为 PyTorch 张量
last_sequence = torch.tensor(last_sequence, dtype=torch.float32)

# 预测未来 7 天的股价走势
future_predictions = []
print(scaler.inverse_transform(scaled_data[-1].reshape(-1, 1) ))
model.eval()
with torch.no_grad():
    current_sequence = last_sequence
    for _ in range(7):  # 预测未来 7 天
        # 使用模型预测下一步
        next_pred = model(current_sequence).numpy().flatten()[0]
        future_predictions.append(next_pred)

        # 更新当前序列：移除最早的值，添加新的预测值
        new_sequence = np.append(current_sequence.numpy()[0, 1:, :], [[next_pred]], axis=0)
        current_sequence = torch.tensor(new_sequence.reshape(1, time_step, 1), dtype=torch.float32)

# 反归一化预测结果
future_predictions = scaler.inverse_transform(np.array(future_predictions).reshape(-1, 1))

# 打印未来 7 天的预测值
print("未来 7 天的预测股价：")
for i, price in enumerate(future_predictions, start=1):
    print(f"第 {i} 天: {price[0]:.2f}")

# 绘制结果
plt.figure(figsize=(12, 6))
plt.plot(data.index, scaler.inverse_transform(scaled_data), label='Actual Price', color='blue')

# 绘制未来 7 天的预测值
future_dates = pd.date_range(start=data.index[-1], periods=8)[1:]  # 生成未来 7 天的日期
plt.plot(future_dates, future_predictions, label='Future Predictions', color='red', linestyle='--')

plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.title('Future Stock Price Prediction')
plt.legend()
plt.show()