#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   ProcessPoems.py    
@Contact :   LJL959@QQ.com.com
@License :   (C)Copyright 2019-2020

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2020/4/6 18:36   LiuJiaoLong      1.0         None
@Description : 处理数据源
'''

# 处理文本
import numpy as np
from collections import Counter


def ProcessPoems(batch_size=64, poetry_file='./data/poems.txt'): # 从main里运行，一个点，从本文件直接运行，两个点
    poems = []
    with open(poetry_file, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                title, content = line.strip().split(':')
                if '_' in content or '(' in content or '（' in content or '[' in content:
                    continue
                if len(content) < 5 or len(content) > 79:
                    continue
                content = 'B' + content + 'E'
                poems.append(content)
            except Exception as e:
                pass
    # poems = sorted(poems, key=lambda poem: len(poem))   # 按照每首诗的长度排序
    print('---------------文本处理开始---------------')
    print(poems[0])
    print('···············文本处理结束···············')

    print('古诗总量：', len(poems))

    all_words = []      # 统计字出现的次数
    for poem in poems:
        all_words += [word for word in poem]
    counter = Counter(all_words)
    # print(counter.items())  # 字与其出现的次数
    # item会吧字典中的每一项变成一个2元素元组，字典变成大list
    count_pairs = sorted(counter.items(), key=lambda x: -x[1])
    words, _ = zip(*count_pairs)  # 利用zip提取
    # print(words)
    words = words[:len(words)] + (' ',)   # 以最长的诗句为基准，不够的用空格补齐
    # word-->int
    word_num_map = dict(zip(words, range(len(words))))
    # 把诗词转换为向量
    to_num = lambda word: word_num_map.get(word, len(words))
    poem_vector = [list(map(to_num, poem)) for poem in poems]
    print('---------------诗词转换成向量开始---------------')
    print(poem_vector[0])
    print('···············诗词转换成向量结束···············')

    n_chunk = len(poem_vector) // batch_size
    x_batches = []
    y_batches = []
    for i in range(n_chunk):
        start_index = i * batch_size
        end_index = start_index + batch_size
        batches = poem_vector[start_index: end_index]
        length = max(map(len, batches))   # 记录下最长的诗句的长度
        x_data = np.full((batch_size, length), word_num_map[' '], np.int32)
        for row in range(batch_size):
            x_data[row, :len(batches[row])] = batches[row]
        # print(len(x_data[0]))   # 每个batch中数据长度不等
        y_data = np.copy(x_data)
        y_data[:, :-1] = x_data[:, 1:]
        """
        x_data          y_data
        1,2,3,4,5       2,3,4,5,5
        6,2,4,6,9       2,4,6,9,9
        """
        x_batches.append(x_data)
        y_batches.append(y_data)
        print('---------------语料准备开始---------------')
        print(x_batches[0])
        print(y_batches[0])
        print('···············语料准备结束···············')

    return words, poem_vector, to_num, x_batches, y_batches


if __name__ == '__main__':
    ProcessPoems()

