import torch
import torch.nn as nn

# 定义带有注意力机制的自回归模型
from torch import optim


class AutoregressiveModelWithAttention(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_heads, num_layers):
        super(AutoregressiveModelWithAttention, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.embedding = nn.Embedding(input_size, hidden_size)
        self.attention = nn.MultiheadAttention(hidden_size, num_heads)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.embedding(x)  # 将输入序列进行嵌入
        x = x.permute(1, 0, 2)  # 调整维度以适应MultiheadAttention的输入要求

        # 自注意力机制
        attn_output, _ = self.attention(x, x, x)

        # 恢复维度并进行全连接层操作
        out = self.fc(attn_output.permute(1, 0, 2))
        return out


# 实例化带有注意力机制的自回归模型
# input_size = 100  # 输入特征维度
# hidden_size = 128  # 嵌入和注意力机制的隐藏层大小
# output_size = 100  # 输出特征维度

# 假设有一个词汇表 vocab，将单词映射到索引
vocab = {"This": 0, "is": 1, "a": 2, "test": 3, "sentence": 4, "for": 5, "the": 6, "autoregressive": 7, "model": 8,
         "with": 9, "attention": 10}

# 修正参数
input_size = len(vocab)  # 根据词汇表大小设置输入特征维度
hidden_size = 256  # 设置隐藏层大小
output_size = len(vocab)  # 输出特征维度与输入特征维度相同

num_heads = 8  # 注意力头数
num_layers = 3  # Transformer层数
model_with_attention = AutoregressiveModelWithAttention(input_size, hidden_size, output_size, num_heads, num_layers)


# print(model_with_attention)


# 单词转换为索引
def word_to_index(word):
    return vocab.get(word, len(vocab))  # 如果单词不在词汇表中，返回词汇表大小作为未知单词的索引


# 索引转换为单词
def index_to_word(index):
    for word, idx in vocab.items():
        if idx == index:
            return word
    return "<UNK>"  # 如果索引不存在于词汇表中，返回未知单词标记


# 测试 word_to_index 和 index_to_word 函数
# print(word_to_index("test"))  # 输出：3
# print(index_to_word(7))  # 输出："autoregressive"


# 测试用例
input_sentence = "This is a test sentence for the autoregressive model with attention"

test_case = "test sentence"


# 假设有一个函数可以将输入句子转换为对应的张量形式，这里假设使用简单的单词索引表示
def sentence_to_tensor(sentence):
    # 这里假设有一个函数 word_to_index(word) 可以将单词转换为对应的索引
    word_indices = [word_to_index(word) for word in sentence.split()]
    return torch.tensor(word_indices).unsqueeze(0)  # 添加 batch 维度


# 将输入句子转换为张量
# input_tensor = sentence_to_tensor(input_sentence)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model_with_attention.parameters(), lr=0.001)

# 将输入句子转换为张量
input_tensor = sentence_to_tensor(input_sentence)

# 模型训练
num_epochs = 10
for epoch in range(num_epochs):
    optimizer.zero_grad()
    output = model_with_attention(input_tensor)

    # 构造目标张量（假设是输入张量向右移动一个位置）
    target = input_tensor.squeeze(0)
    target = torch.cat((target[1:], target[-1].unsqueeze(0)))

    loss = criterion(output.squeeze(0), target)
    loss.backward()
    optimizer.step()

    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

# 保存模型
torch.save(model_with_attention.state_dict(), 'autoregressive_model_with_attention.pth')

# 使用模型进行预测
with torch.no_grad():
    # output = model_with_attention(input_tensor)
    output = model_with_attention(sentence_to_tensor(test_case))

# 输出结果
predicted_words = [index_to_word(torch.argmax(word)) for word in output[0]]
predicted_sentence = ' '.join(predicted_words)
print("Input Sentence:", test_case)
print("Predicted Sentence:", predicted_sentence)

if __name__ == '__main__':
    print("==")
