from tqdm import tqdm
from importlib import import_module


PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号
pad_size=32

x = import_module('models.' + 'bert')
config = x.Config('THUCNews')


contents = []
with open('THUCNews/data/input.txt', 'r', encoding='UTF-8') as f:
    for line in tqdm(f, leave=False):
        lin = line.strip()
        if not lin:
            continue
        content, label = lin.split('\t')
        #print(content, label)
        token = config.tokenizer.tokenize(content)
        token = [CLS] + token
        seq_len = len(token)
        #print(seq_len)
        mask = []
        token_ids = config.tokenizer.convert_tokens_to_ids(token)
        #print(token_ids)

        if pad_size:
            if len(token) < pad_size:
                mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
                token_ids += ([0] * (pad_size - len(token)))
            else:
                mask = [1] * pad_size
                token_ids = token_ids[:pad_size]
                seq_len = pad_size
        contents.append((token_ids, int(label), seq_len, mask))
        #print(contents)