"""
1.	Word2vec 是 Google 于 2013 年开源推出的一个用于获取 word vector 的工具包，它简单、高效，因此引起了很多人的关注。
word2vec对词向量的训练有两种方式，一种是CBOW模型，即通过上下文来预测中心词；另一种skip-Gram模型，即通过中心词来预测上下文。
给定英文语料CORPUS = "natural language processing and machine learning is fun and exciting"，根据如下要求，
进行CBOW模型的训练（42分）

【Note: for Tensorflow 1.x】
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

tf.random.set_random_seed(777)
np.random.seed(777)

# ①	读取语料数据，设定中心词与临近词窗口尺度为2，词嵌入维度为5
CORPUS = 'natural language processing and machine learning is fun and exciting'
WINDOW_SIZE = 2
N_WORDVEC_DEEP = 5
WORDS = CORPUS.split(' ')
print(WORDS)
WORDS_LEN = len(WORDS)
print('WORDS_LEN', WORDS_LEN)
DICT = set(WORDS)
DICT_LEN = len(DICT)
DICT_LIST = list(DICT)
word2idx = {w: i for i, w in enumerate(DICT)}
idx2word = {i: w for i, w in enumerate(DICT)}
print('word2idx', word2idx)
print('idx2word', idx2word)

# ②	根据CBOW模型，构造训练数据集
x_data = []
y_data = []
for i, w in enumerate(WORDS):
    y_word = w
    y_idx = word2idx[y_word]
    for j in range(max(0, i - WINDOW_SIZE), min(WORDS_LEN, i + WINDOW_SIZE + 1)):
        if i == j:
            continue
        x_word = WORDS[j]
        x_idx = word2idx[x_word]
        x_data.append(x_idx)
        y_data.append(y_idx)
print('CBOW模型:')
for x_idx, y_idx in zip(x_data, y_data):
    print(f'{idx2word[x_idx]}, {idx2word[y_idx]}')

x_data = np.expand_dims(x_data, axis=1)
y_data = np.expand_dims(y_data, axis=1)
print('x_data', x_data.shape)
print('y_data', y_data.shape)

# model
x = tf.placeholder(tf.int32, [None, 1], 'x')
y = tf.placeholder(tf.int32, [None, 1], 'y')

x_oh = tf.one_hot(x, DICT_LEN)  # (m, dict_len)
x_oh = tf.cast(x_oh, dtype=tf.float32, name='x_oh')
E = tf.Variable(tf.random.normal([DICT_LEN, N_WORDVEC_DEEP]), dtype=tf.float32, name='E')  # (dict_len, vec_deep)
e = tf.matmul(x_oh, E, name='e')  # (m, vec_deep)
E2 = tf.Variable(tf.random.normal([N_WORDVEC_DEEP, DICT_LEN]), dtype=tf.float32, name='E2')  # (vec_deep, dict_len)
b = tf.Variable(tf.zeros([1, DICT_LEN]), dtype=tf.float32, name='b')  # (1, dict_len)
h = tf.add(tf.matmul(e, E2), b)  # (m, dict_len)

j = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=h),
    name='j'
)
optim = tf.train.AdamOptimizer(learning_rate=0.01).minimize(j)

# ③	模型训练参数：训练1000次，每50次打印一次损失函数值
ITERS = 1000
GROUP = 50
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    cost_his = []
    for step in range(ITERS):
        _, cost = sess.run([optim, j], feed_dict={x: x_data, y: y_data})
        cost_his.append(cost)
        if step % GROUP == 0 or step == ITERS - 1:
            print(f'#{step + 1}: cost = {cost}')
    plt.title('cost in iterations')
    plt.plot(cost_his)

    # ④	模型训练完毕后，打印词嵌入矩阵
    word2vecMat = sess.run(E)
    print('模型训练完毕后，打印词嵌入矩阵')
    print(word2vecMat)

    # ⑤	选择machine，计算其词嵌入向量
    print('选择machine，计算其词嵌入向量')
    word = 'machine'
    word_idx = word2idx[word]
    print('word', word)
    print('word_idx', word_idx)
    word2vec = word2vecMat[word_idx]
    print(word2vec)

# ⑥	遍历其他词与该词的余弦相似度，打印前3个相似度最大的词和对应的相似度
print('遍历其他词与该词的余弦相似度，打印前3个相似度最大的词和对应的相似度')
cos_and_idx_arr = []
for idx, vec in enumerate(word2vecMat):
    if idx == word_idx:
        continue
    a = np.sum(vec * word2vec)
    b = np.linalg.norm(vec) * np.linalg.norm(word2vec)
    cos = a / b
    cos_and_idx_arr.append([cos, idx])
cos_and_idx_arr = sorted(cos_and_idx_arr, key=lambda x: x[0], reverse=True)
cos_and_idx_arr = cos_and_idx_arr[:3]
for i, [cos, idx] in enumerate(cos_and_idx_arr):
    print(f'#{i + 1}: idx={idx}, word={idx2word[idx]}, cos={cos}')

# show all plotting
plt.show()
