"""
LSTM模型训练脚本
功能：读取特征数据，构建LSTM时间序列模型，训练模型并保存
"""

import pandas as pd
import pickle
import torch
from config import *

def inputdata(path):
    """
    读取CSV数据文件
    
    Args:
        path (str): CSV文件路径
    
    Returns:
        pd.DataFrame: 读取的数据框
    """
    data = pd.read_csv(path, header=0, sep=",", encoding="utf-8")
    return data


# 读取特征数据
feature = inputdata(FEATURE_DATA_PATH)


def process_data(npdf, stp=32):
    """
    处理时间序列数据，生成训练序列和标签
    
    Args:
        npdf (np.array): 输入的数据数组
        stp (int): 时间步长，默认32天
    
    Returns:
        list: 包含(序列, 标签)元组的列表
    """
    ret = []
    # 滑动窗口生成训练数据
    for i in range(npdf.shape[0] - stp):
        train_seq = npdf[i : i + stp]  # 前32天的数据作为输入序列
        train_label = npdf[i + stp]    # 第33天的数据作为标签
        train_seq = torch.FloatTensor(train_seq)
        train_label = torch.FloatTensor(train_label).view(-1)
        ret.append((train_seq, train_label))

    return ret


# 获取列名和股票代码列表
column_names = feature.columns.tolist()
stockcodes = feature["StockCode"].drop_duplicates().tolist()

# 为每支股票生成训练数据
train_data = []
for stockcode in stockcodes:
    # 筛选单支股票数据
    stock_data = feature[feature["StockCode"] == stockcode]
    max_date = stock_data["Date"].max()
    min_date = stock_data["Date"].min()
    stock_data = stock_data.values
    
    # 过滤数据量不足的股票
    if len(stock_data) < 32:
        continue
    
    # 生成该股票的训练数据
    train_data += process_data(stock_data, stp=32)

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader

# 从配置文件获取模型参数
lstm_config = MODEL_PARAMS["LSTM"]
training_config = TRAINING_PARAMS

input_size = 1
hidden_size = lstm_config["hidden_size"]
num_layers = lstm_config["num_layers"]
output_size = 1
dropout = lstm_config["dropout"]

# 设备配置
if training_config["device"] == "auto":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
    device = torch.device(training_config["device"])


# 定义LSTM模型类
class LSTMModel(nn.Module):
    """
    LSTM时间序列预测模型
    """
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout):
        super(LSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size, hidden_size, num_layers, batch_first=True, dropout=dropout
        )
        # 全连接输出层
        self.fc = nn.Linear(hidden_size, output_size)

    def __del__(self):
        """析构函数，清理隐藏状态"""
        del self.hidden_cell

    def forward(self, x):
        """
        前向传播
        
        Args:
            x (torch.Tensor): 输入序列
        
        Returns:
            torch.Tensor: 预测输出
        """
        # LSTM前向传播
        out, self.hidden_cell = self.lstm(x, self.hidden_cell)
        # 取最后一个时间步的输出进行预测
        out = self.fc(out[:, -1, :])
        return out


def train_model(train_data, i, num_epochs=None):
    """
    训练LSTM模型
    
    Args:
        train_data (list): 训练数据列表
        i (int): 预测目标列的索引
        num_epochs (int): 训练轮数，如果为None则使用配置文件中的值
    
    Returns:
        LSTMModel: 训练好的模型
    """
    if num_epochs is None:
        num_epochs = training_config["num_epochs"]
    
    # 如果没有训练数据，返回空模型
    if len(train_data) == 0:
        return LSTMModel(1, hidden_size, num_layers, output_size, dropout).to(device)

    # 将数据移动到指定设备
    train_data = [(x.to(device), y.to(device)) for x, y in train_data]

    # 构建训练数据张量
    X_train_tensor = torch.stack([x for x, _ in train_data])
    y_train_tensor = torch.stack([y[i] for _, y in train_data])

    # 创建数据加载器
    train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
    train_loader = DataLoader(train_dataset, batch_size=training_config["batch_size"], shuffle=True)
    
    # 初始化模型
    model = LSTMModel(
        len(train_data[0][0][0]), hidden_size, num_layers, output_size, dropout
    ).to(device)

    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=training_config["learning_rate"])
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.7)

    # 训练循环
    for epoch in range(num_epochs):
        tot_loss = 0.0
        for batch_X, batch_y in train_loader:
            optimizer.zero_grad()
            
            # 初始化隐藏状态
            model.hidden_cell = (
                torch.zeros(model.num_layers, batch_X.size(0), model.hidden_size).to(device),
                torch.zeros(model.num_layers, batch_X.size(0), model.hidden_size).to(device),
            )
            
            # 前向传播
            outputs = model(batch_X)
            loss = criterion(outputs, batch_y.unsqueeze(1))
            tot_loss += loss.item()
            
            # 反向传播和参数更新
            loss.backward()
            optimizer.step()
        
        # 更新学习率
        scheduler.step()
        print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, tot_loss))
    
    return model


# 训练Close价格预测模型
colname2index = {x: i for i, x in enumerate(column_names)}
model_i = train_model(train_data, colname2index["Close"] + 2)

# 保存训练好的模型
model_name = f"{MODEL_DIR}/model_Close.bin"
pickle.dump(model_i, open(model_name, "wb"))
