import pickle
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import math


def train_tokenizer(num_words, word_len, data_nums):
    tokenizer = Tokenizer(num_words)
    # train tokenizer
    for i in range(math.ceil(data_nums / 100000)):
        print('start fact_%d_%d' % (i * 100000, i * 100000 + 100000))
        with open('data/preprocess/participle/fact_%d_%d_%d.pkl' % (i * 100000, i * 100000 + 100000, word_len),
                  mode='rb') as f:
            fact = pickle.load(f)
        fact_num = len(fact)
        n = 0
        # 分批训练
        while n < fact_num:
            tokenizer.fit_on_texts(texts=fact[n:n + 10000])
            n += 10000
        print('finish fact_%d_%d' % (i * 100000, i * 100000 + 100000))

    with open('./model/tokenizer_fact_%d.pkl' % num_words, mode='wb') as f:
        pickle.dump(tokenizer, f)
    return tokenizer


def sequence(tokenizer, word_len, max_len, data_nums):
    for i in range(math.ceil(data_nums / 100000)):
        print('start fact_%d_%d' % (i * 100000, i * 100000 + 100000))
        with open('data/preprocess/participle/fact_%d_%d_%d.pkl' % (i * 100000, i * 100000 + 100000, word_len),
                  mode='rb') as f:
            fact = pickle.load(f)
        # 分批执行 texts_to_sequences
        fact_seq = tokenizer.texts_to_sequences(texts=fact)
        with open('data/preprocess/sequences/fact_seq_%d_%d.pkl' % (i * 100000, i * 100000 + 100000), mode='wb') as f:
            pickle.dump(fact_seq, f)

        # pad_sequence将词序列填充为相同长度
        fact_num = len(fact_seq)
        n = 0
        fact_pad_seq = []
        # 分批执行pad_sequences
        while n < fact_num:
            fact_pad_seq += list(pad_sequences(fact_seq[n:n + 20000], maxlen=max_len,
                                               padding='post', value=0, dtype='int'))
            n += 20000
        with open('data/preprocess/pad_sequences/fact_pad_seq_%d_%d_%d.pkl' % (max_len, i * 100000, i * 100000 + 100000),
                  mode='wb') as f:
            pickle.dump(fact_pad_seq, f)
        print('finish fact_%d_%d' % (i * 100000, i * 100000 + 100000))


def merge_sequence(num_words, max_len, data_nums):
    # 将分批的pad_sequence合并
    fact_pad_seq = []
    for i in range(math.ceil(data_nums / 100000)):
        print('start fact_cut_%d_%d' % (i * 100000, i * 100000 + 100000))
        with open('data/preprocess/pad_sequences/fact_pad_seq_%d_%d_%d.pkl' % (max_len, i * 100000, i * 100000 + 100000),
                  mode='rb') as f:
            fact_pad_seq += pickle.load(f)
    fact_pad_seq = np.array(fact_pad_seq)
    np.save('data/preprocess/fact_pad_seq_%d_%d.npy' % (num_words, max_len), fact_pad_seq)
