import sys
import os

cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "..", "src"))

from doasr.models.tokenizer import DoAsrTokenizer

def main():
    filepath = os.path.join(cur_path, "wiki_demo.txt")
    output_path = os.path.join(cur_path, "..", "dataset", "wiki_demo_for_doasr.txt")
    chunk_size = 512
    tokenizer = DoAsrTokenizer.from_pretrained(os.path.join(cur_path, "..", "models", "DoASR-0.1B"))

    with open(filepath, 'r', encoding='utf8') as f, open(output_path, 'w', encoding='utf8') as out_f:
        for line in f:
            line = line.strip()
            if len(line) == 0:
                continue
            input_ids = tokenizer(line)['input_ids']
            for i in range(0, len(input_ids), chunk_size):
                chunk = input_ids[i:i+chunk_size]
                decode_text = tokenizer.decode(chunk)
                out_f.write(decode_text + '\n')
    print("done")

if __name__ == '__main__':
    main()
