
# -*- coding:utf-8 -*-
'''
-------------------------------------------------
   Description :  txt2p data load
   Author :       liupeng
   Date :         2020-03-02
-------------------------------------------------
'''

import os 
import re
import sys
import time
import numpy as np
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.tokenizer import Tokenizer


# 建立分词器
tokenizer = Tokenizer(token_dict='./publish/vocab.txt' , do_lower_case=True)
class data_bert_generator(DataGenerator):
    """数据生成器
    """

    def __iter__(self, random=False, maxlen=97):
        idxs = list(range(len(self.data)))
        if random:
            np.random.shuffle(idxs)
        batch_token_ids, batch_segment_ids, batch_labels = [], [], []
        for i in idxs:
            label = [[1,0,0,0,0,0]]
            text1, label0 = self.data[i]
            label = label + label0.tolist()
            text1 = ''.join( text1 ) 
            token_ids, segment_ids = tokenizer.encode(text1, max_length=maxlen) #[1:-1]
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            batch_labels.append(label+[[1,0,0,0,0,0]])
            if len(batch_token_ids) == self.batch_size or i == idxs[-1]:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids, batch_segment_ids], batch_labels
                batch_token_ids, batch_segment_ids, batch_labels = [], [], []


class data_common_generator(DataGenerator):
    """数据生成器
    """

    def __iter__(self, random=True):
        idxs = list(range(len(self.data)))
        if random:
            np.random.shuffle(idxs)
        batch_data, batch_labels = [], []
        for i in idxs:
            data, label = self.data[i]
            #print(data, label)
            batch_data.append( data )
            batch_labels.append( label )
            if len(batch_data) == self.batch_size or i == idxs[-1]:
                yield np.array(batch_data), np.array(batch_labels) 
                batch_data, batch_labels = [], []


def load_data(filename1, filename2):
    xlist = np.load(filename1) 
    ylist = np.load(filename2) 
    print ('xlist:', len(xlist) )
    print ('ylist:', len(ylist) )

    D = []
    for per_data, per_label in zip(xlist, ylist):
        D.append( (per_data, per_label) ) 
    return D