import tensorflow as tf
import numpy as np


if __name__ == '__main__':

    sentences1 = [
        'I love my dog',
        'I love my cat'
    ]

    # 初始化编码器，大小为100个词, 未被编码的词用"<oov>"代替
    tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=100, oov_token="<oov>")

    # 生成词编码
    tokenizer.fit_on_texts(sentences1)
    word_index = tokenizer.word_index
    print(word_index)    # {'<oov>': 1, 'i': 2, 'love': 3, 'my': 4, 'dog': 5, 'cat': 6}

    # 用词编码对句子进行编码
    sentences2 = [
        'I really love my dog',
        'I love cat',
        '学 不会 了 属于是'
    ]
    test_seq = tokenizer.texts_to_sequences(sentences2)
    print(test_seq)    # [[2, 3, 4, 5], [2, 3, 4, 6], [1, 1, 1, 1]]

    # 填充句子编码长度， 句编码前补充0，使长度统一
    padded = tf.keras.preprocessing.sequence.pad_sequences(test_seq, maxlen=4, padding='post', truncating='post')
    print(padded)
    # [[2 1 3 4 5]
    # [0 0 2 3 6]
    # [0 1 1 1 1]]
    # numpy格式

