import torch.optim

from dataset.skip_gram_loader import _generate_loader
from torch import nn


class SkipGramModel(nn.Module):
    def __init__(self, vocab_size, embedding_dims):  # embedding_dims 自定义（一般是3或者4）
        super().__init__()
        self.vocab_size = vocab_size
        self.embedding_dims = embedding_dims

        # 定义维度词表
        self.embedding = nn.Embedding(vocab_size, embedding_dims)
        # 线性层(从词向量维度回到词分类)
        self.fc = nn.Linear(embedding_dims, vocab_size)

    def forward(self, x):  # x 输入时是2维的 (batch_size, seq_len)
        batch_size, seq_len = x.shape
        x = self.embedding(x)  # (batch_size, seq_len, embedding_dims)
        x = x.reshape(-1, self.embedding_dims)  # (batch_size * seq_len, embedding_dims)
        x = self.fc(x)  # (batch_size * seq_len, vocab_size)
        # return x.reshape(batch_size, seq_len, self.vocab_size)
        return x  # 更有利于做交叉熵损失


dataloader, vocab = _generate_loader()
model = SkipGramModel(len(vocab), 4)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

epochs = 1000
for epoch in range(epochs):
    loss_list = []
    for inputs, outputs in dataloader:
        optimizer.zero_grad()
        predicts = model(inputs.unsqueeze(-1))  # 由于分割的数据只有一个维度.
        loss = criterion(predicts, outputs)
        loss.backward()
        optimizer.step()

        nn.utils.clip_grad_norm_(model.parameters(), max_norm=0.2)

        loss_list.append(loss.item())

    print(f"epoch:{epoch + 1}/{epochs} -- loss:{sum(loss_list) / len(loss_list):.4f}")
