Manus
Fix indentation error in core/plot.py and update app.py for correct data passing.
9d08131
# core/models.py
import torch
import logging
import torch.nn as nn
import math
# ---------------- Base ----------------
class BaseTimeSeriesModel(nn.Module):
def __init__(self):
super(BaseTimeSeriesModel, self).__init__()
def reset_weights(self):
for layer in self.children():
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
# ---------------- LSTM ----------------
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0,
)
self.fc = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
batch_size = x.size(0)
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = self.dropout(out[:, -1, :])
return self.fc(out)
# ---------------- GRU ----------------
class GRUModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(GRUModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0,
)
self.fc = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
batch_size = x.size(0)
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(x.device)
out, _ = self.gru(x, h0)
out = self.dropout(out[:, -1, :])
return self.fc(out)
# ---------------- CNN ----------------
class CNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(CNNModel, self).__init__()
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=3, padding=1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
x = x.transpose(1, 2) # [batch, features, seq_len]
out = self.conv1(x)
out = self.relu(out)
out = out.mean(dim=2) # global avg pooling
out = self.dropout(out)
return self.fc(out)
# ---------------- MLP ----------------
class MLPModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(MLPModel, self).__init__()
layers = []
in_features = input_size
for _ in range(num_layers):
layers.append(nn.Linear(in_features, hidden_size))
layers.append(nn.ReLU())
layers.append(nn.Dropout(dropout))
in_features = hidden_size
layers.append(nn.Linear(hidden_size, output_size))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
return self.mlp(x[:, -1, :]) # flatten last timestep
# ---------------- Hybrid CNN-GRU ----------------
class HybridCNNGRUModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(HybridCNNGRUModel, self).__init__()
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=3, padding=1)
self.gru = nn.GRU(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
x = x.transpose(1, 2)
out = self.conv1(x).transpose(1, 2)
out, _ = self.gru(out)
out = self.dropout(out[:, -1, :])
return self.fc(out)
# ---------------- Transformer ----------------
class TransformerModel(nn.Module):
def __init__(
self, input_size, hidden_size, num_layers, output_size, dropout=0.2, nhead=4
):
super(TransformerModel, self).__init__()
self.embedding = nn.Linear(input_size, hidden_size)
encoder_layer = nn.TransformerEncoderLayer(
d_model=hidden_size, nhead=nhead, dropout=dropout
)
self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
x = self.embedding(x)
out = self.transformer(x.transpose(0, 1)) # seq_first
out = out[-1, :, :]
return self.fc(out)
# ---------------- BiLSTM ----------------
class BiLSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
super(BiLSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0,
bidirectional=True,
)
self.fc = nn.Linear(hidden_size * 2, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
logging.debug(f"Inside forward: initial x type={type(x)}, x={x}")
if isinstance(x, (tuple, list)):
logging.debug(f"Model forward received tuple/list: type={type(x)}, length={len(x)}")
x = x[0]
if not isinstance(x, torch.Tensor):
x = torch.tensor(
x, dtype=torch.float32, device=next(self.parameters()).device
)
batch_size = x.size(0)
h0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = self.dropout(out[:, -1, :])
return self.fc(out)