#!/usr/bin/env python
import numpy as np
import torch
from torch import nn, optim
from torch.optim import optimizer

from model import AutoregressiveModelWithAttention
from tokenizer import MyTokenizer, text

device = "mps"

block_size = 12
batch_size = 3


def get_batch():
    data = tokenizer.encode(text)
    ix = torch.randint(len(data) - block_size, (batch_size,))
    x = torch.stack([torch.from_numpy(np.array(data[i:i + block_size])) for i in ix])
    y = torch.stack([torch.from_numpy(np.array(data[i + 1:i + 1 + block_size])) for i in ix])
    return x.to(device), y.to(device)


tokenizer = MyTokenizer()
tokenizer.build_dict(text)
data = tokenizer.encode(text)
vocab = tokenizer.word_dict

input_size = 12
# input_size = len(vocab)  # 根据词汇表大小设置输入特征维度
hidden_size = 256  # 设置隐藏层大小
output_size = 12
# output_size = len(vocab)  # 输出特征维度与输入特征维度相同

num_heads = 8  # 注意力头数
num_layers = 3  # Transformer层数

model_with_attention = AutoregressiveModelWithAttention(input_size, hidden_size, output_size, num_heads, num_layers)
model_with_attention.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model_with_attention.parameters(), lr=0.001)

# 模型训练
num_epochs = 100
for epoch in range(num_epochs):
    optimizer.zero_grad()
    src, tgt = get_batch()
    output = model_with_attention(src)
    loss = criterion(output.squeeze(0), tgt)
    loss.backward()
    optimizer.step()

    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

# 保存模型
torch.save(model_with_attention.state_dict(), 'autoregressive_model_with_attention.pth')
test_case = '女人更喜欢咖啡'

# 使用模型进行预测
with torch.no_grad():
    # output = model_with_attention(input_tensor)
    # output = model_with_attention(torch.tensor(tokenizer.encode(test_case)).to(device))
    output = model_with_attention(torch.tensor(tokenizer.encode(test_case)).unsqueeze(0).to(device))

# 输出结果
print(output.shape)
predicted_words = [torch.argmax(word) for word in output[0]]
decode = tokenizer.decode(predicted_words)
predicted_sentence = ''.join(decode)
# # # print("Input Sentence:", input_sentence)
print("Predicted Sentence:", predicted_sentence)

if __name__ == '__main__':
    print("==")
