import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
import io

def get_data():
    '''
    tensorflow自带的情绪训练数据集
    来自imdb评论，情绪分为正负面
    :return:
    '''
    training_sentences = []
    training_labels = []

    testing_sentences = []
    testing_labels = []

    imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)

    train_data, test_data = imdb['train'], imdb['test']

    for s, l in train_data:
        # print(l.numpy(), type(l.numpy()))
        training_sentences.append(str(s.numpy()))
        training_labels.append(l.numpy())

    for s, l in test_data:
        # print(l.numpy(), type(l.numpy()))
        testing_sentences.append(str(s.numpy()))
        testing_labels.append(l.numpy())

    return (training_sentences, training_labels), (testing_sentences, testing_labels)


if __name__ == '__main__':
    vocab_size = 10000          # 词汇量
    embedding_dim = 16          # 词向量编码维度
    max_length = 120            # 补充句子的最大长度
    padding = 'post'            # 结尾补充0
    truncating = 'post'         # 超过120从结尾开始截取
    oov_tok = "<oov>"           # 未被编码的词语的替代符号

    (training_sentences, training_labels), (testing_sentences, testing_labels) = get_data()

    tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size, oov_token=oov_tok)
    tokenizer.fit_on_texts(training_sentences)
    word_index = tokenizer.word_index
    sequences = tokenizer.texts_to_sequences(training_sentences)
    padded = tf.keras.preprocessing.sequence.pad_sequences(sequences, maxlen=max_length, truncating=truncating, padding=padding)

    testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
    testing_padded = tf.keras.preprocessing.sequence.pad_sequences(testing_sequences, maxlen=max_length, truncating=truncating, padding=padding)

    training_labels_final = np.array(training_labels)
    testing_labels_final = np.array(testing_labels)

    model = tf.keras.Sequential([
        tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(6, activation='relu'),
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])

    # 打印模型信息
    model.summary()

    # 损失函数交叉熵，优化器adam，准确率衡量指标accuracy
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    # 迭代次数10，validation_data为测试验证集
    num_epochs = 10
    model.fit(padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))

    # 打印词向量大小和维度
    e = model.layers[0]
    weights = e.get_weights()[0]
    print(weights.shape)

    # 反转单词索引，建立字典
    reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
    # 建立绘制词向量的两个文件
    out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
    out_m = io.open('meta,tsv', 'w', encoding='utf-8')
    for word_num in range(1, vocab_size):
        word = reverse_word_index[word_num]
        embeddings = weights[word_num]
        out_m.write(word + "\n")
        out_v.write('\t'.join([str(x) for x in embeddings]) + '\n')
    out_v.close()
    out_m.close()

    # https://projector.tensorflow.org/ 访问这个网站，载入词向量文件将看到词向量分布状况


