"""
谷歌2013年提出的Word2Vec是目前最常用的词嵌入模型之一。Word2Vec实际是一种浅层的神经网络模型，它有两种网络结构，分别是CBOW
（Continues Bag of Words）连续词袋和Skip-gram。
Skip-gram是通过当前词来预测窗口中上下文词出现的概率模型，把当前词当做x，把窗口中其它词当做y，通过一个隐层接，但不激活（只做线性运算），
再接全连接输出，用Softmax激活函数来预测其它词的概率。
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

tf.random.set_random_seed(1)

# 1 使用tensorflow完成Word2Vec词嵌入模型（共100分）
# ①    数据集读取
# 1)    获取数据预料
CORPUS = "natural language processing and machine learning is fun and exciting"
words_in_sentence = CORPUS.split()
print('words_in_sentence', words_in_sentence)
word_dict = set(words_in_sentence)
len_dict = len(word_dict)
idx2word = {i: w for i, w in enumerate(list(word_dict))}
word2idx = {w: i for i, w in idx2word.items()}
print('word_dict', word_dict)
print('idx2word', idx2word)
print('word2idx', word2idx)

# 2)    做数据处理，窗口尺寸WINDOW_SIZE = 2，词嵌入维度EMBEDDING_DIM = 5，构建数据样本
WINDOW_SIZE = 2
EMBEDDING_DIM = 5
idx_in_sentence = [word2idx[w] for w in words_in_sentence]
print('idx_in_sentence', idx_in_sentence)
len_sentence = len(idx_in_sentence)
x, y = [], []
for i in range(len_sentence):
    for j in range(max(0, i - WINDOW_SIZE), min(len_sentence -1, i + WINDOW_SIZE) + 1):
        if i == j:
            continue
        x.append(idx_in_sentence[i])
        y.append(idx_in_sentence[j])
for xw, yw in zip(x, y):
    print(f'{idx2word[xw]} => {idx2word[yw]}')
x = np.int64(x)
y = np.int64(y)
x_oh = np.eye(len_dict)[x]
y_oh = np.eye(len_dict)[y]

# ②    模型创建
# 1)    参考下图，完成模型创建
ph_x = tf.placeholder(tf.float32, (None, len_dict), 'ph_x')
ph_y = tf.placeholder(tf.float32, (None, len_dict), 'ph_y')
E = tf.Variable(tf.random_normal((len_dict, EMBEDDING_DIM)), dtype=tf.float32, name='E')
em = tf.matmul(ph_x, E)
E2 = tf.Variable(tf.random_normal((EMBEDDING_DIM, len_dict)), dtype=tf.float32, name='E2')
pred = tf.matmul(em, E2)

# 2)    定义损失函数和优化器
criterion = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=ph_y, logits=pred)
)
accuracy = tf.reduce_mean(
    tf.cast(
        tf.equal(
            tf.argmax(ph_y, axis=1),
            tf.argmax(pred, axis=1),
        ),
        tf.float32,
    )
)
optim = tf.train.AdamOptimizer(learning_rate=0.001).minimize(criterion)

# ③    模型训练
# 1)    将构建的样本数据进行训练，训练1000步，每50步打印一次loss输出
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    loss_his = []
    iters, step = 1000, 50
    for i in range(iters):
        loss, _, acc = sess.run([criterion, optim, accuracy], feed_dict={ph_x: x_oh, ph_y: y_oh})
        loss_his.append(loss)
        if i % step == 0 or i == iters - 1:
            print(f'#{i + 1}: loss = {loss}, acc = {acc}')

    # 2)    绘制模型训练过程中的损失曲线
    plt.plot(loss_his)

    # 3)    训练结束后，打印词嵌入矩阵
    print('打印词嵌入矩阵')
    print(idx2word)
    E_mat = sess.run(E)
    print(E_mat)

    # 4)    打印词“machine”的词向量
    idx = word2idx['machine']
    print('打印词“machine”的词向量')
    the_vec = E_mat[idx]
    print(the_vec)

    # 5)    计算语料数据中各个词与“machine”的余弦相似度
    def cos(a, b):
        ab = np.sum(a * b)
        ra = np.linalg.norm(a)
        rb = np.linalg.norm(b)
        cos = ab / (ra * rb + 1e-20)
        return cos

    cos_arr = []
    for vec in E_mat:
        cos_v = cos(the_vec, vec)
        cos_arr.append(cos_v)
    print('各个词与“machine”的余弦相似度')
    print(cos_arr)

    # 6)    打印与“machine”最相似的前3个词
    order = np.argsort(cos_arr)[::-1][1:4]
    print('与“machine”最相似的前3个词')
    print([idx2word[idx] for idx in order])

    # finally
    print('Please check and close the plotting window to finish ...')
    plt.show()
