import json
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

def get_data():
    '''
    获取 句子数据集 和 标签【句意是否为讽刺】
    :return:
    '''
    with open("./data/sarcasm.json") as f:
        datastore = json.load(f)

    sentences = []
    labels = []
    urls = []
    for item in datastore:
        sentences.append(item['headline'])
        labels.append(item['is_sarcastic'])
        urls.append(item['article_link'])

    # print(sentences)
    # print(labels)
    # print(urls)

    return sentences, labels

if __name__ == '__main__':

    vocab_size = 30000  # 词汇量
    embedding_dim = 16  # 词向量编码维度
    max_length = 32  # 补充句子的最大长度
    padding = 'post' # 补齐方式，pre在句编码前补0，post在句编码后补0
    truncating = 'post'  # 超过 max_length 时在结尾进行截取
    oov_tok = "<oov>"  # 未被编码的词语的替代符号

    '''
    dtype：返回的numpy array的数据类型
    padding：‘pre’或‘post’，确定当需要补0时，在序列的起始还是结尾补
    truncating：‘pre’或‘post’，确定当需要截断序列时，从起始还是结尾截断
    value：浮点数，此值将在填充时代替默认的填充值0
    '''

    sentences, labels = get_data()

    # 训练集、测试集分开
    training_size = int(len(sentences)*0.8)
    training_sentences = sentences[0:training_size]
    testing_sentences = sentences[training_size:]
    training_labels = np.array(labels[0:training_size])
    testing_labels = np.array(labels[training_size:])

    print(len(training_sentences), len(testing_sentences))
    print(len(training_labels), len(testing_labels))

    # 训练集集词编码
    tokenizer = tf.keras.preprocessing.text.Tokenizer(oov_token="<oov>")
    tokenizer.fit_on_texts(training_sentences)
    word_index = tokenizer.word_index

    # 训练集句编码， 测试集的句编码也用训练集的词编码进行编码
    training_sequences = tokenizer.texts_to_sequences(training_sentences)
    training_padded = tf.keras.preprocessing.sequence.pad_sequences(training_sequences, maxlen=max_length, padding=padding, truncating=truncating)
    print("training_padded[0]: {}".format(training_padded[0]))
    print("training_padded.shape: {}".format(training_padded.shape))
    testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
    testing_padded = tf.keras.preprocessing.sequence.pad_sequences(testing_sequences, padding='post')

    model = tf.keras.Sequential([
        tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
        tf.keras.layers.GlobalAvgPool1D(),
        tf.keras.layers.Dense(24, activation='relu'),
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.summary()

    num_epochs = 30

    history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)

    def plot_graphs(history, string):
        plt.plot(history.history[string])
        plt.plot(history.history['val_'+string])
        plt.xlabel('Epochs')
        plt.ylabel(string)
        plt.legend([string, 'val_'+string])
        plt.show()

    plot_graphs(history, 'acc')
    plot_graphs(history, 'loss')
