import torch
from tensorflow.keras.preprocessing.text import Tokenizer
from torch.utils.tensorboard import SummaryWriter
import jieba
import torch.nn as nn


# import tensorflow as tf
# import tensorboard as tb
# tf.io.gfile = tb.compat.tensorflow_stub.io.gfile

def dm01_nnembeding_show():
    sentence1 = '传智教育是一家上市公司，旗下有黑马程序员品牌。我是在黑马这里学习人工智能'
    sentence2 = '我爱自然语言处理'
    sentences = [sentence1,sentence2]

    word_list = []
    for s in sentences:
        word_list.append(jieba.lcut(s))
    print('word_list--->',word_list)

    mytokenizer = Tokenizer()
    mytokenizer.fit_on_texts(word_list)
    print('mytokenizer.index_word--->',mytokenizer.index_word)
    print('mytokenizer.word_index--->',mytokenizer.word_index)
    my_token_list = mytokenizer.index_word.values()
    print('mytoken_list--->',my_token_list)

    embed = nn.Embedding(num_embeddings=len(mytokenizer.index_word),embedding_dim=8)
    print('词嵌入层--->',embed)

    torch.set_printoptions(precision=4,sci_mode=False)
    print('词嵌入层的矩阵参数(每个单词的词向量) embed--->',embed.weight.data)

    summarywriter = SummaryWriter()
    summarywriter.add_embedding(embed.weight.data,my_token_list)
    summarywriter.close()

    word0_vec = embed(torch.tensor(0))
    print('word0_vec--->',word0_vec)

    for idx in range(len(mytokenizer.index_word)):
        tmp_vec = embed(torch.tensor(idx))
        print('tmp_vec-->',tmp_vec.detach().numpy())

def dm02_nnembeding_show():
    mytensor = torch.tensor([
        [1,2,3,4],
        [4,5,6,20]
    ])
    mytensor_embed = nn.Embedding(num_embeddings=100,embedding_dim=8)
    print('mytensor-->',mytensor.shape,mytensor)
    print('mytensor_embed-->',mytensor_embed.shape,mytensor)


if __name__ == '__main__':
    print('ok')
    dm01_nnembeding_show()
