import torch
import torch.nn as nn

raw_sentence = "她是一名学生"
# 第1步：单词索引
word_list = " ".join(raw_sentence).split()
word_list = list(set(word_list))
word2id = {w: i for i, w in enumerate(word_list)}
id2word = {i: w for i, w in enumerate(word_list)}
n_class = len(word2id)
# ------------------------------------------CBOW---------------------------------------------------
# 第1步：定义窗口函数
def CBOW(raw_text, window_size=2):
    data = []
    for i in range(window_size, len(raw_text) - window_size):
        context = [raw_text[i - window_size], raw_text[i - (window_size - 1)], raw_text[i + (window_size - 1)], raw_text[i + window_size]]
        target = raw_text[i]
        data.append((context, target))
    return data
raw_text = raw_sentence
# raw_text = "她正在踢毽子"
data = CBOW(raw_text)
input_batch = []
target_batch = []
for context, target in data:
    context = [word2id[word] for word in context]
    target = word2id[target]
    input_batch.append(context)
    target_batch.append(target)
input_batch,target_batch = torch.LongTensor(input_batch), torch.LongTensor(target_batch)
print(data)
print(input_batch)
print(target_batch)

word_to_ix = word2id
vocab_size = n_class
embedding_dim = 2


# 第2步：构造模型
class CBOW_Model(torch.nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        super(CBOW_Model, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.linear1 = nn.Linear(embedding_dim, 128)
        self.activation_function1 = nn.ReLU()
        self.linear2 = nn.Linear(128, vocab_size)

    def forward(self, inputs):
        embeds = self.embeddings(inputs)
        embeds = torch.mean(embeds, dim=1)
        out = self.linear1(embeds)
        out = self.activation_function1(out)
        out = self.linear2(out)
        return out


model = CBOW_Model(vocab_size, embedding_dim)
# 第3步：损失函数&优化函数
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


def make_context_vector(context, word_to_ix):
    context_vector = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
    return context_vector


loss_function = criterion

# 第4步:执行训练
for epoch in range(1000):
    optimizer.zero_grad()
    output = model(input_batch)
    total_loss = loss_function(output, target_batch)
    if (epoch + 1) % 250 == 0:
        print('Epoch:', '%d' % (epoch), 'cost =', '{:.6f}'.format(total_loss))

    total_loss.backward()
    optimizer.step()

# Test
predict = model(input_batch).data.max(1, keepdim=True)[1]
print([[id2word[n.item()], id2word[m.item()], id2word[j.item()], id2word[k.item()]] for n, m, j, k in
       input_batch.squeeze()])
print([id2word[n.item()] for n in target_batch.squeeze()])
print([id2word[n.item()] for n in predict.squeeze()])

# Embedding
embeddings = model.embeddings.weight.detach().numpy()
for i, word in id2word.items():
    embedding = ' '.join(map(str, embeddings[i]))
    print('{} {}\n'.format(word, embedding))