import torch
from python_nlp.datasets.load_large_json import load_large_json
import json


class LargeJsonDataset(torch.utils.data.IterableDataset):
    
    def __init__(self, path):
        self.path = path
        
    def __iter__(self):
        for dialog in  load_large_json(self.path):
            yield {
                'dialog': json.dumps(dialog, ensure_ascii=False),
            }


if '__main__' == __name__:
    
    from PyCmpltrtok.common import sep

    def main():
        xfile_path = '/home/yunpeng/datasets/med_dialog/validate_data.json'

        for i, x in enumerate(load_large_json(xfile_path, limit=5)):
            sep(i)
            for j, xx in enumerate(x):
                print(i, j, xx)

        sep()
        sep()
        ds = LargeJsonDataset(xfile_path)
        dl = torch.utils.data.DataLoader(
            ds,
            batch_size=4,
        )
        
        for i, batch in enumerate(dl):
            if i >= 5:
                break
            sep(f'batch {i}', char='=')
            for j, sample in enumerate(batch['dialog']):
                sep(f'sample {j}')
                xlist = json.loads(sample)
                for k, xline in enumerate(xlist):
                    print(i, j, k, xline)

    main()