#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   train.py
@Time    :   2021/11/08 16:39:31
@Author  :   Yaadon 
'''

# here put the import lib
from predict import get_similar_tokens
import paddle
from net.skip_gram import SkipGram
from data.loader import load_dataset, build_batch, load_text8, data_preprocess, build_dict, convert_corpus_to_id,subsampling,build_data
    

if __name__ == '__main__':
    
    # 开始训练，定义一些训练过程中需要使用的超参数
    batch_size = 512
    epoch_num = 3
    embedding_size = 200
    step = 0
    learning_rate = 0.001

    # 加载数据
    # dataset, vocab_size, word2id_dict, id2word_dict = load_dataset()
    corpus = load_text8()
    corpus = data_preprocess(corpus)
    
    word2id_freq, word2id_dict, id2word_dict = build_dict(corpus)
    vocab_size = len(word2id_freq)
    
    # print("there are totoally %d different words in the corpus" % vocab_size)
    # for _, (word, word_id) in zip(range(50), word2id_dict.items()):
    #     print("word %s, its id %d, its word freq %d" % (word, word_id, word2id_freq[word_id]))
    corpus = convert_corpus_to_id(corpus, word2id_dict)

    corpus = subsampling(corpus, word2id_freq)
    # print("%d tokens in the corpus" % len(corpus))
    # print(corpus[:50])
    corpus_light = corpus[:int(len(corpus)*0.2)]
    dataset = build_data(corpus_light, word2id_dict, word2id_freq, vocab_size)
    # for _, (center_word, target_word, label) in zip(range(50), dataset):
    #     print("center_word %s, target %s, label %d" % (id2word_dict[center_word],
    #                                                id2word_dict[target_word], label))
    
    # for _, batch in zip(range(10), build_batch(dataset, 128, 3)):
    #     print(batch)
    # 将模型放到GPU上训练
    paddle.set_device('gpu:0')

    # 通过我们定义的SkipGram类，来构造一个Skip-gram模型网络
    skip_gram_model = SkipGram(vocab_size, embedding_size)

    # 构造训练这个网络的优化器
    adam = paddle.optimizer.Adam(learning_rate=learning_rate, parameters = skip_gram_model.parameters())

    # 使用build_batch函数，以mini-batch为单位，遍历训练数据，并训练网络
    for center_words, target_words, label in build_batch(
        dataset, batch_size, epoch_num):
        # 使用paddle.to_tensor，将一个numpy的tensor，转换为飞桨可计算的tensor
        center_words_var = paddle.to_tensor(center_words)
        target_words_var = paddle.to_tensor(target_words)
        label_var = paddle.to_tensor(label)

        # 将转换后的tensor送入飞桨中，进行一次前向计算，并得到计算结果
        pred, loss = skip_gram_model(
            center_words_var, target_words_var, label_var)

        # 程序自动完成反向计算
        loss.backward()
        # 程序根据loss，完成一步对参数的优化更新
        adam.step()
        # 清空模型中的梯度，以便于下一个mini-batch进行更新
        adam.clear_grad()

        # 每经过100个mini-batch，打印一次当前的loss，看看loss是否在稳定下降
        step += 1
        if step % 1000 == 0:
            print("step %d, loss %.3f" % (step, loss.numpy()[0]))

        # 每隔10000步，打印一次模型对以下查询词的相似词，这里我们使用词和词之间的向量点积作为衡量相似度的方法，只打印了5个最相似的词
        if step % 10000 ==0:
            get_similar_tokens('movie', word2id_dict, id2word_dict, 5, skip_gram_model.embedding.weight)
            get_similar_tokens('one', word2id_dict, id2word_dict, 5, skip_gram_model.embedding.weight)
            get_similar_tokens('chip', word2id_dict, id2word_dict, 5, skip_gram_model.embedding.weight)
            paddle.save(skip_gram_model.state_dict(), 'nlp_epoch{}'.format(step//10000))
