import numpy as np
import pandas as pd
import pickle
import os
# 切词工具
import jieba
from tqdm import tqdm


debug = 1
index_to_word = []
word_to_index = {}
word_to_one_hot = {}
word_size = 0


# 读取停用词
def load_stop_words():
    with open('./datas/stopwords.txt', encoding='utf-8') as f:
        return f.read().split('\n')


# 切词函数
def cut_words():
    stop_words = load_stop_words()
    sentences = pd.read_csv('./datas/数学原始数据.csv', encoding='gbk', names=["articles"])['articles']
    result = []
    for s in sentences:
        # 切词操作
        sentence_cutted = jieba.lcut(s)
        tmp_result = []
        # 不要停用词
        for sc in sentence_cutted:
            if sc not in stop_words:
                tmp_result.append(sc)
        result.append(tmp_result)
    return result


def get_params(data):
    global index_to_word, word_size, word_to_one_hot, word_to_index
    for ds in data:
        for d in ds:
            if d not in index_to_word:
                index_to_word.append(d)
                word_size += 1
    word_to_index = {word: index for index, word in enumerate(index_to_word)}
    for word, index in word_to_index.items():
        one_hot = np.zeros((1, word_size))
        one_hot[0, int(index)] = 1
        word_to_one_hot[word] = one_hot
    return


def softmax(x):
    ex = np.exp(x)
    return ex/np.sum(ex, axis=1, keepdims=True)


all_cutted_words = cut_words()
if debug:
    print(all_cutted_words)
get_params(all_cutted_words)
if debug:
    print(index_to_word)
    print(word_to_index)
    print(word_to_one_hot)
embedding_num = 107
# 学习率
lr = 0.01
# 训练轮次
epoch = 100
# 上下文范围 不宜太大，不然会降低临近词汇的相关性
n_gram = 5

w1 = np.random.normal(-1, 1, size=(word_size, embedding_num))
w2 = np.random.normal(-1, 1, size=(embedding_num, word_size))
for e in range(epoch):
    for words in tqdm(all_cutted_words):
    # for words in all_cutted_words:
        for index, now_word in enumerate(words):
            now_word_one_hot = word_to_one_hot[now_word]
            # 上界超出会自动截断，下界不行
            others_words = words[max(0, index - n_gram): index] + words[index + 1: index + 1 + n_gram]
            for others_word in others_words:
                # 获取其他词的one_hot形式
                other_word_one_hot = word_to_one_hot[others_word]

                # now_word_one_hot是输入 要乘以w_1得到hidden
                hidden = now_word_one_hot @ w1
                p = hidden @ w2
                pre_ans = softmax(p)

                # A @ B = C
                # delta_C = G
                # delta_A = G @ B.T
                # delta_B = A.T @ G

                # delta_p是和真值的差距
                delta_p = pre_ans - other_word_one_hot
                delta_w2 = hidden.T @ delta_p
                delta_hidden = delta_p @ w2.T
                delta_w1 = now_word_one_hot.T @ delta_hidden

                w1 -= lr * delta_w1
                w2 -= lr * delta_w2

with open('./datas/word2vec.pkl', "wb") as f:
    pickle.dump([w1, word_to_index, index_to_word, word_size], f)
