import torch
import torch.nn as nn

# 定义带有注意力机制的自回归模型
from torch import optim


class AutoregressiveModelWithAttention(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_heads, num_layers):
        super(AutoregressiveModelWithAttention, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.embedding = nn.Embedding(input_size, hidden_size)
        self.attention = nn.MultiheadAttention(hidden_size, num_heads)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.embedding(x)  # 将输入序列进行嵌入
        x = x.permute(1, 0, 2)  # 调整维度以适应MultiheadAttention的输入要求

        # 自注意力机制
        attn_output, _ = self.attention(x, x, x)

        # 恢复维度并进行全连接层操作
        out = self.fc(attn_output.permute(1, 0, 2))
        return out

