"""
https://pythonspeed.com/articles/json-memory-streaming/
https://stackoverflow.com/questions/10382253/reading-rather-large-json-files
"""
import os
import json
import ijson
from PyCmpltrtok.common import get_dir_name_ext, sep


def load_large_json(xfile_path, encoding='utf8', limit=0):
    with open(xfile_path, 'rb') as f:
        xitems_gen = ijson.items(f, 'item')
        cnt = -1
        for xrecord in xitems_gen:
            cnt += 1
            if limit and cnt >= limit:
                break
            yield xrecord


if '__main__' == __name__:

    def main():
        sep('start')
        
        # xfile_path = r'D:\_dell7590_root\local\LNP_datasets\med\med_dialog\MedDialog_processed\MedDialog_processed\train_data.json'
        xfile_path = '/home/yunpeng/datasets/med_dialog/train_data.json'
        print(f'path = |{xfile_path}|')

        n_turn = 1
        n_limit = 4

        result = []
        n = 0
        print('loading ...')
        for i, x in enumerate(load_large_json(xfile_path, limit=0)):
            if len(x) == n_turn * 2:
                print('.', end='', flush=True)
                n += 1
                result.append(x)
                if n >= n_limit:
                    break
        print()
        
        xdir, xbase, xext = get_dir_name_ext(xfile_path)
        xtgt_path = os.path.join(xdir, f'{xbase}.{n_turn}turn-x{n_limit}.tmp.json')
        print(f'Writing to |{xtgt_path}|')
        with open(xtgt_path, 'w', encoding='utf8') as f:
            json.dump(result, f, ensure_ascii=False)
        print('Written.')
        

    main()
    sep('All over')
