import os
from nlp_tools.tokenizer.whitespace_tokenizer import WhiteSpaceTokenizer
import json

from pretraining import TrainingDatasetRoBERTa
from training_settings import curpus_path,bert_vocab_path



def word_segment(text):
    return text.split(" ")


def read_copuse(file_path):
    texts = []
    with open(file_path,encoding='utf-8') as fread:
        for index, line in enumerate(fread):
            json_obj = json.loads(line.strip())
            texts.append(json_obj['title'])
    return texts

if __name__ == '__main__':
    model = 'roberta'
    sequence_length = 512
    workers = 40
    max_queue_size = 4000



    tokenizer = WhiteSpaceTokenizer(token_dict=bert_vocab_path)


    output = os.path.join(os.path.dirname(curpus_path),'tfrecord')
    if not os.path.exists(output):
        os.mkdir(output)

    assert model in ['roberta', 'gpt', 'unilm']  # 判断是否支持的模型类型
    pretrain_texts = read_copuse(curpus_path)
    if model == 'roberta':
        TD = TrainingDatasetRoBERTa(
            tokenizer, word_segment, sequence_length=sequence_length
        )

        for i in range(10):  # 数据重复10遍
            TD.process(
                corpus=pretrain_texts,
                record_name=os.path.join(output,'corpus.%s.tfrecord' % i),
                workers=workers,
                max_queue_size=max_queue_size,

            )


