import numpy as np
import tiktoken
import torch
from torch import optim
from torch.nn import CrossEntropyLoss

from config import Config
from core.models import Transformer
from data import test_data

src_data = test_data

encoding = tiktoken.get_encoding("cl100k_base")

batch_size = Config.batch_size

d_model = Config.model_dimension  # 每一个字符转化成Embedding的大小
n_heads = Config.heads_number  # 多头注意力机制时，把头分为几个，这里说的是分为8个
vocab_size = encoding.n_vocab  # vocab_size
src_context_length = Config.src_context_length  # 文本长度
tgt_context_length = Config.tgt_context_length  # 文本长度
heads_number = Config.heads_number  # 多头数量
n_layers = Config.n_layers  # 层数
d_k = Config.d_k  # vocab_size
d_v = Config.d_v  # vocab_size
d_ff = Config.d_ff  # vocab_size
dropout = Config.dropout  # vocab_size

model = Transformer(d_model, vocab_size, src_context_length, tgt_context_length, heads_number, n_layers, d_k, d_v, d_ff,
                    dropout)
criterion = CrossEntropyLoss(ignore_index=0)  # 交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.99)  # 用Adam的话效果不好

for epoch in range(Config.epoch):

    for index in range(0, len(src_data), batch_size):
        optimizer.zero_grad()
        batch_data = src_data[index * batch_size:(index + 1) * batch_size]
        enc_inputs = torch.LongTensor([encoding.encode(item[0]) for item in batch_data])
        dec_inputs = torch.LongTensor([encoding.encode(item[1]) for item in batch_data])
        target_batch = dec_inputs[:, 1:]
        outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_inputs, dec_inputs)
        loss = criterion(outputs, target_batch.contiguous().view(-1))  # 损失函数是 输出的预测 和 真实
        loss.backward()
        optimizer.step()
