import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from data_process import prepare_stock_data
from model import LSTMModel
from torch.utils.data import DataLoader


def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device):
    """训练模型"""
    train_losses = []
    test_losses = []

    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0
        for X_batch, y_batch in train_loader:
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)

            optimizer.zero_grad()
            y_pred = model(X_batch)
            loss = criterion(y_pred, y_batch)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()

        train_loss = train_loss / len(train_loader)
        train_losses.append(train_loss)

        # 验证阶段
        model.eval()
        test_loss = 0
        with torch.no_grad():
            for X_batch, y_batch in test_loader:
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                y_pred = model(X_batch)
                loss = criterion(y_pred, y_batch)
                test_loss += loss.item()

        test_loss = test_loss / len(test_loader)
        test_losses.append(test_loss)

        print(f'Epoch {epoch+1}/{num_epochs}')
        print(f'Training Loss: {train_loss:.4f}')
        print(f'Test Loss: {test_loss:.4f}')
        print('-' * 60)

    return train_losses, test_losses


def main():
    # 设置参数
    LOOKBACK = 60  # 回看天数
    BATCH_SIZE = 32
    LEARNING_RATE = 0.001
    NUM_EPOCHS = 50
    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 准备数据
    train_dataset, test_dataset, scaler = prepare_stock_data(
        './resources/google_stock_train.csv',
        lookback=LOOKBACK
    )

    train_loader = DataLoader(
        train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

    # 初始化模型
    model = LSTMModel().to(DEVICE)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)

    # 训练模型
    train_losses, test_losses = train_model(
        model, train_loader, test_loader,
        criterion, optimizer, NUM_EPOCHS, DEVICE
    )

    # 绘制损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(train_losses, label='Training Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Test Loss')
    plt.legend()
    plt.show()

    # 保存模型
    torch.save(model.state_dict(), 'stock_lstm_model.pth')


if __name__ == '__main__':
    main()
