import os
from nlp_tools.tokenizer.bert_tokenizer import BertTokenizer

from pretraining import TrainingDatasetRoBERTa


def word_segment(text):
    return text.split(" ")


def read_copuse(file_path):
    texts = []
    with open(file_path,encoding='utf-8') as fread:
        for line in fread:
            texts.append(line.strip())
    return texts

if __name__ == '__main__':
    pretrain_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_roberta_wwm_ext_L-12_H-768_A-12'
    model = 'roberta'
    sequence_length = 512
    workers = 40
    max_queue_size = 4000



    dict_path = os.path.join(pretrain_model_path,'vocab.txt')#'/home/spaces_ac_cn/chinese_L-12_H-768_A-12/vocab.txt'
    tokenizer = BertTokenizer(dict_path, do_lower_case=True)

    curpus_path = r'/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/pretrained_data.txt'
    output = os.path.join(os.path.dirname(curpus_path),'tfrecord')
    if not os.path.exists(output):
        os.mkdir(output)

    assert model in ['roberta', 'gpt', 'unilm']  # 判断是否支持的模型类型
    pretrain_texts = read_copuse(curpus_path)
    if model == 'roberta':
        TD = TrainingDatasetRoBERTa(
            tokenizer, word_segment, sequence_length=sequence_length
        )

        for i in range(10):  # 数据重复10遍
            TD.process(
                corpus=pretrain_texts,
                record_name=os.path.join(output,'corpus.%s.tfrecord' % i),
                workers=workers,
                max_queue_size=max_queue_size,

            )


