import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np



# 定义模型
class TransformerClassifier(nn.Module):
    def __init__(self, input_length, d_model = 128, num_layers = 4, num_heads = 4, dropout_rate = 0.2):
        super(TransformerClassifier, self).__init__()
        self.input_layer = nn.Linear(1, d_model)
        self.pos_encoding = nn.Parameter(torch.zeros(input_length, d_model))
        self.transformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model, num_heads, d_model * 4, dropout_rate),
            num_layers
        )
        self.output = nn.Sequential(nn.Linear(d_model, 1),
                                    nn.Sigmoid())

    def forward(self, x):
        x = x.unsqueeze(-1)
        x = self.input_layer(x)
        p = self.pos_encoding
        x = x + p
        x = self.transformer(x)
        x = x.mean(dim=1)
        x = self.output(x)
        return x.squeeze()

if __name__ == "__main__":
    # 定义模型超参数
    input_length = 5
    d_model = 64
    num_layers = 2
    num_heads = 1
    dropout_rate = 0.2
    learning_rate = 0.001
    num_epochs = 10
    batch_size = 2
    # 加载数据
    x_train = np.array([[0, 2.5, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]])
    y_train = np.array([0, 0, 1, 1, 1, 0])
    x_train = torch.tensor(x_train, dtype=torch.float32)
    y_train = torch.tensor(y_train, dtype=torch.float32)

    # 初始化模型和优化器
    model = TransformerClassifier(10, input_length, d_model, num_layers, num_heads, dropout_rate)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 训练模型
    for epoch in range(num_epochs):
        permutation = torch.randperm(x_train.size()[0])
        for i in range(0, x_train.size()[0], batch_size):
            indices = permutation[i:i+batch_size]
            batch_x, batch_y = x_train[indices], y_train[indices]
            optimizer.zero_grad()
            output = model(batch_x)
            print(str(output.size()) + " : " + str(batch_y.size()))
            loss = nn.BCEWithLogitsLoss()(output, batch_y)
            loss.backward()
            optimizer.step()
        print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))

    # 使用模型进行预测
    x_test = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
    x_test = torch.tensor(x_test, dtype=torch.float32)
    with torch.no_grad():
        output = model(x_test)
        print(output)
        predictions = (torch.sigmoid(output) >= 0.5).long()
        print('Predictions:', predictions)
