# 此模块用于预处理唐诗
import collections
import numpy as np
# import tensorflow as tf
#
# # 处理原文本
# FLAGS = tf.app.flags.FLAGS

start_token = 'B'    # 这里的开始标志和结束标志要与TangPoems.py里面的标志必须一致，否则会报错
end_token = 'E'


def ProcessPoems(file_name):
    poems = []   # 存放唐诗
    with open(file_name, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            try:
                title, content = line.strip().split(':')   # 去除空格，并且标题与内容按冒号分割
                content = content.replace(' ', '')
                if '_' in content or '(' in content or '《' in content or '（' in content or '[' in content:
                    continue  # 若诗句中出现一些无关的符号，跳过这首诗
                if len(content) < 5 or len(content) > 80:
                    continue  # 若唐诗内容过短或太长，跳过该首诗
                content = start_token + content + end_token  # 唐诗组成为起始符＋内容＋结束符
                poems.append(content)   # 把诗放到poems里面
            except ValueError as e:
                pass
    poems = sorted(poems, key=lambda l: len(line))   # 把唐诗按长度进行排序
    print('--------文本处理部分-------')
    print(poems[0])
    print('--------文本处理部分-------')
    all_words = []
    for poem in poems:  # 找每一首诗
        all_words += [word for word in poem]   # 找每首诗里的每个字
    counter = collections.Counter(all_words)    # 统计词频
    count_pairs = sorted(counter.items(), key=lambda x: -x[1])   # 词与词频对
    words, _ = zip(*count_pairs)   # 从上面的对里面解压出单独的词
    words = words[:len(words)] + (' ',)  # 过滤（这里取了所有的词）
    word_int_map = dict(zip(words, range(len(words))))   # 字与标号一一对应  用word2vector效果更好
    poems_vectors = [list(map(lambda word:word_int_map.get(word, len(words)), poem)) for poem in poems]   # 字的向量
    print('。。。辅助数据结构生成部分。。。')
    print(poems_vectors[0])
    print('。。。辅助数据结构生成部分。。。')
    return poems_vectors, word_int_map, words


def GenerateBatch(batch_size, poems_vector, word_to_int):
    n_chunk = len(poems_vector) // batch_size    # 共n个batch
    x_batches = []
    y_batches = []
    for i in range(n_chunk):
        start_index = i*batch_size
        end_index = start_index + batch_size

        batches = poems_vector[start_index: end_index]
        length = max(map(len, batches))
        x_data = np.full((batch_size, length), word_to_int[' '], np.int32)   # 每个诗的长度不足的用空格填充
        for row in range(batch_size):    # 往空的里面填充
            x_data[row, :len(batches[row])] = batches[row]

        y_data = np.copy(x_data)    # y_data为标签，相当于把x_data往后串了一格
        y_data[:, :-1] = x_data[:, 1:]  # x_data为我今天打球，y_data则为今天打球
        x_batches.append(x_data)
        y_batches.append(y_data)
    print('、、、准备语料部分、、、')
    print(x_batches[0])
    print(y_batches[0])
    print('、、、准备语料部分、、、')
    return x_batches, y_batches


if __name__ == '__main__':
    ProcessPoems('poems.txt')

    # 语料预处理
    poems_vector, word_to_int, vocabularies = ProcessPoems('poems.txt')
    # batches产生
    GenerateBatch(64, poems_vector, word_to_int)

