import torch
import torch.nn as nn

raw_sentence = "她是一名学生"
# 第1步：单词索引
word_list = " ".join(raw_sentence).split()
word_list = list(set(word_list))
word2id = {w: i for i, w in enumerate(word_list)}
id2word = {i: w for i, w in enumerate(word_list)}
n_class = len(word2id)
# print(n_class)
print("word2id:",word2id)
print("id2word:",id2word)

# 构造训练数据
def make_batch(raw_sentence, word2id, n_step):
    input_batch, target_batch = [], []
    word_list = list(raw_sentence)
    for i in range(n_step, len(word_list)):  # 目标词索引迭代
        target = word_list[i]  # 获取目标词
        input_index = [word2id[word_list[j]] for j in range((i - n_step), i)]  # 获取目标词相关输入数据集
        target_index = word2id[target]  # 目标词索引
        input_batch.append(input_index)
        target_batch.append(target_index)

    return input_batch, target_batch
n_step = 2
input_batch, target_batch = make_batch(raw_sentence, word2id, n_step)
# 数据转为tensor
input_batch,target_batch = torch.LongTensor(input_batch), torch.LongTensor(target_batch)
print("input_batch:",input_batch)
print("target_batch:",target_batch)

# 第2步：构建模型
import torch
from torch import nn
import torch.optim as optim
import numpy


class NNLM(nn.Module):
    def __init__(self):
        super(NNLM, self).__init__()
        self.embeddings = nn.Embedding(n_class, m)  # m代表词向量维度
        self.hidden1 = nn.Linear(n_step * m, n_hidden, bias=False)  # n_step窗口移动步长
        self.ones = nn.Parameter(torch.ones(n_hidden))
        self.hidden2 = nn.Linear(n_hidden, n_class, bias=False)
        self.hidden3 = nn.Linear(n_step * m, n_class, bias=False)  # final layer
        self.bias = nn.Parameter(torch.ones(n_class))

    def forward(self, X):
        X = self.embeddings(X)  # embeddings
        X = X.view(-1, n_step * m)  # first layer

        tanh = torch.tanh(self.ones + self.hidden1(X))
        output = self.bias + self.hidden3(X) + self.hidden2(tanh)
        return output


# 第3步：定义损失函数
criterion = nn.CrossEntropyLoss()

# 第4步：执行训练
m = 5  # 词向量的维度
n_hidden = 2  # 隐层个数
n_step = 2

model = NNLM()
# 定义优化器
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练10次
for epoch in range(1000):
    optimizer.zero_grad()  # 梯度归零
    output = model(input_batch)
    # print(target_batch)
    # print(output)

    # output : [batch_size, n_class], target_batch : [batch_size]
    loss = criterion(output, target_batch)
    if (epoch + 1) % 250 == 0:
        print('Epoch:', '%d' % (epoch), 'cost =', '{:.6f}'.format(loss))
    loss.backward()  # 反向传播计算 每个参数的梯度值
    optimizer.step()  # 每一个参数的梯度值更新

# Predict
predict = model(input_batch).data.max(1, keepdim=True)[1]
print([[id2word[n.item()], id2word[m.item()]] for n, m in input_batch.squeeze()])
print([id2word[n.item()] for n in target_batch.squeeze()])
print([id2word[n.item()] for n in predict.squeeze()])

# embedding
embeddings = model.embeddings.weight.detach().numpy()
for i, word in id2word.items():
    embedding = ' '.join(map(str, embeddings[i]))
    print('{} {}\n'.format(word, embedding))