import os
import tarfile
import urllib.request
import tensorflow as tf
import numpy as np
import re
import string
from random import randint

url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
filepath = 'data/aclImdb_v1.tar.gz'

# 下载数据集
if not os.path.exists('data'):
    os.makedirs('data')
if not os.path.isfile(filepath):
    print('downloading...')
    result = urllib.request.urlretrieve(url, filepath)
    print('downloaded: ', result)
else:
    print(filepath, ' is existed!')

# 数据文件解压
if not os.path.exists('data/aclImdb'):
    tfile = tarfile.open(filepath, 'r:gz')
    print('extracting...')
    tfile.extractall('data/')
    print('extraction completed')
else:
    print('data/aclImdb is existed!')


# 读取数据集  相关函数
def remove_tags(text):
    re_tag = re.compile(r'<[^>]+>')
    return re_tag.sub('', text)

def read_files(filetype):
    path = 'data/aclImdb/'
    file_list = []
    # 读取正面评价的文件路径，存到file_list列表里
    positive_path = path + filetype + '/pos/'
    for f in os.listdir(positive_path):
        file_list += [positive_path + f]
    pos_files_num = len(file_list)
    # 读取负面评价的文件路径，存到file_list列表里
    negative_path = path + filetype + '/neg/'
    for f in os.listdir(negative_path):
        file_list += [negative_path + f]
    neg_files_num = len(file_list) - pos_files_num

    print('read', filetype, 'files:', len(file_list))
    print(pos_files_num, 'pos files in', filetype, 'files')
    print(neg_files_num, 'neg files in', filetype, 'files')

    # 得到所有标签。标签用one-hot编码，正面标签为[1 0]，负面标签为[0 1]
    all_labels = [[1, 0]] * pos_files_num + [[0, 1]] * neg_files_num
    # 得到所有文本
    all_texts = []
    for f in file_list:
        with open(f, encoding='utf8') as fp:
            all_texts += [remove_tags(' '.join(fp.readlines()))]

    return all_labels, all_texts

# 得到 训练与测试 用的标签和文本
train_labels, train_texts = read_files('train')
test_labels, test_texts = read_files('test')

# 查看数据、标签
print('训练数据')
print('正面评价：')
print(train_texts[0])
print(train_labels[0])
print('负面评价：')
print(train_texts[12500])
print(train_labels[12500])
print('测试数据')
print('正面评价：')
print(test_texts[0])
print(test_labels[0])
print('负面评价：')
print(test_texts[12500])
print(test_labels[12500])

# ========== 建立词汇词典 =========
# 建立Token
token = tf.keras.preprocessing.text.Tokenizer(num_words=4000)
token.fit_on_texts(train_texts)
# 查看token读取了多少文档
print(token.document_count)
# 将单词映射为它们的排名或索引
print(token.word_index)
# 将单词映射为它们在训练期间所出现的文档或文本的数量
print(token.word_docs)
# 查看Token中词汇出现的频次排名
print(token.word_counts)

# ========== 文字转数字列表 =========
train_sequences = token.texts_to_sequences(train_texts)
test_sequences = token.texts_to_sequences(test_texts)
print(train_texts[0])
print(train_sequences[0])

# 让转换后的数字列表长度相同
x_train = tf.keras.preprocessing.sequence.pad_sequences(train_sequences,
                                              padding='post',
                                              truncating='post',
                                              maxlen=400)
y_train = np.array(train_labels)
x_test = tf.keras.preprocessing.sequence.pad_sequences(test_sequences,
                                              padding='post',
                                              truncating='post',
                                              maxlen=400)
y_test = np.array(test_labels)
print(x_train.shape)
# 填充后的数字列表
print(x_train[0])

# ========== 建立模型 =========
model = tf.keras.models.Sequential([
    tf.keras.layers.Embedding(output_dim=32, input_dim=4000, input_length=400),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(units=256, activation='relu'),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Dense(units=2, activation='softmax')
])

# ========== 模型设置与训练 =========
model.compile(optimizer='adam',
              loss=tf.keras.losses.categorical_crossentropy,
              metrics=['accuracy'])
history = model.fit(x_train, y_train,
                    validation_split=0.2,
                    epochs=10,
                    batch_size=128,
                    verbose=2)

# 评估模型准确率
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print('Test accuracy: ', test_acc)

# 执行模型预测
predictions = model.predict(x_test)
print(predictions[0])

# ========== 模型应用 =========
review_text = ''
input_seq = token.texts_to_sequences([review_text])
pad_input_seq = tf.keras.preprocessing.sequence.pad_sequences(input_seq,
                                                              padding='post',
                                                              truncating='post',
                                                              maxlen=400)
pred = model.predict(pad_input_seq)
print('predict value:', pred)
