import torch
import torch.nn as nn

class TransformerModel(nn.Module):
    def __init__(self, input_dim=1, num_classes=2):
        super().__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=32, nhead=4)
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
        self.fc = nn.Linear(32, num_classes)

    def forward(self, x):
        x = x.permute(1, 0, 2)
        out = self.encoder(x)
        out = out.mean(dim=0)
        return self.fc(out)

    def train(self, X, y, epochs=10, batch_size=32):
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
        dataset = torch.utils.data.TensorDataset(
            torch.tensor(X, dtype=torch.float32),
            torch.tensor(y, dtype=torch.long)
        )
        loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
        self.train()
        for _ in range(epochs):
            for xb, yb in loader:
                optimizer.zero_grad()
                logits = self(xb)
                loss = criterion(logits, yb)
                loss.backward()
                optimizer.step()

    def predict(self, X):
        self.eval()
        with torch.no_grad():
            logits = self(torch.tensor(X, dtype=torch.float32))
            return torch.argmax(logits, dim=1).numpy()

