from transformers import AutoTokenizer
import argparse
import os

from basic.Dataloader import read_corpus


def main():
    argparser = argparse.ArgumentParser()
    argparser.add_argument('--model_path', default="bert-base-uncased")
    argparser.add_argument('--data', default="STAC_sp/train.json")

    args, extra_args = argparser.parse_known_args()

    plm_path = os.path.join( "/home/yunan/plm", args.model_path )

    tokenizer = AutoTokenizer.from_pretrained(plm_path)
    instances = read_corpus(args.data)

    token_cnt = 0
    line_starts = []
    token_ids_list = []

    for instance in instances:
        for edu in instance.original_EDUs:
            text = edu['text']
            tokens = tokenizer.tokenize(text)
            token_ids = tokenizer.convert_tokens_to_ids(tokens)

            line_starts.append(token_cnt)
            token_cnt += len(token_ids)
            token_ids_list.append(token_ids)
    line_starts.append(token_cnt)

    out_file = args.data + "." + args.model_path
    with open(out_file, mode='w') as outf:
        for token_ids in token_ids_list:
            line_str = " ".join([str(token_id) for token_id in token_ids])
            outf.write(line_str + "\n")
        
    out_file = out_file + ".line"
    with open(out_file, mode='w') as outf:
        for line in line_starts:
            outf.write(str(line) + "\n")
    
    return

if __name__ == "__main__":
    main()