from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras

import numpy as np

# https://blog.csdn.net/qq_31456593/article/details/88412064
# 构建一个简单的文本分类器，并使用IMDB进行训练和测试
# print(tf.__version__)
# https://www.cnblogs.com/nxf-rabbit75/p/10869402.html这是一个计算文本相似度的各种方法介绍
# 下载数据
imdb=keras.datasets.imdb
(train_x, train_y), (test_x, text_y)=keras.datasets.imdb.load_data(num_words=10000)
# # 查看数据
# print("Training entries: {}, labels: {}".format(len(train_x), len(train_y)))
# print(train_x[0])
# print(test_x[0])
# print('len: ',len(train_x[0]), len(train_x[1]))

# 创建id和词的匹配字典
word_index = imdb.get_word_index()
# print(word_index)

word2id = {k:(v+3) for k, v in word_index.items()}

word2id['<PAD>'] = 0
word2id['<START>'] = 1
word2id['<UNK>'] = 2
word2id['<UNUSED>'] = 3
print(word2id)
id2word = {v:k for k, v in word2id.items()}
# print([id2word.get(i, '?') for i in train_x[0]])#获取id2word下边以i为id的word
def get_words(sent_ids):
        return ' '.join([id2word.get(i, '?') for i in sent_ids])#以空格为连接生成新的字符串，get方法是返回字典中为i的值，入果没有则返回？

sent = get_words(train_x[0])
print(sent)
#准备数据
# 句子末尾padding
train_x = keras.preprocessing.sequence.pad_sequences(
    train_x, value=word2id['<PAD>'],
    padding='post', maxlen=256
)
test_x = keras.preprocessing.sequence.pad_sequences(
    test_x, value=word2id['<PAD>'],
    padding='post', maxlen=256
)
print(train_x[0])
print(train_x[1])
print('len: ',len(train_x[0]), len(train_x[1]))


# 构建模型
import tensorflow.keras.layers as layers
vocab_size = 10000
model = keras.Sequential()
model.add(layers.Embedding(vocab_size, 16))
model.add(layers.GlobalAveragePooling1D())
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam',
             loss='binary_crossentropy',
             metrics=['accuracy'])

#训练与验证
x_val = train_x[:10000]
x_train = train_x[10000:]

y_val = train_y[:10000]
y_train = train_y[10000:]

history = model.fit(x_train,y_train,
                   epochs=40, batch_size=512,
                   validation_data=(x_val, y_val),
                   verbose=1)
#epochs：整数，训练终止时的epoch值，训练将在达到该epoch值时停止，当没有设置initial_epoch时，它就是训练的总轮数，否则训练的总轮数为epochs - inital_epoch
#batch_size：整数，指定进行梯度下降时每个batch包含的样本数。训练时一个batch的样本会被计算一次梯度下降，使目标函数优化一步。
#validation_data：形式为（X，y）的tuple，是指定的验证集。
#verbose：日志显示，0为不在标准输出流输出日志信息，1为输出进度条记录，2为每个epoch输出一行记录
result = model.evaluate(test_x, text_y)
print(result)

model.save_weights("save_train.h5")
# 查看准确率时序图
# import matplotlib.pyplot as plt
# history_dict = history.history
# history_dict.keys()
# acc = history_dict['accuracy']
# val_acc = history_dict['val_accuracy']
# loss = history_dict['loss']
# val_loss = history_dict['val_loss']
# epochs = range(1, len(acc)+1)
#
# plt.plot(epochs, loss, 'bo', label='train loss')
# plt.plot(epochs, val_loss, 'b', label='val loss')
# plt.title('Train and val loss')
# plt.xlabel('Epochs')
# plt.xlabel('loss')
# plt.legend()
# plt.show()





