
dataset_type = 'OCRDataset'
#langs=['Arabic','Bangla','Chinese','Latin','Hindi','Korean','Japanese']
langs=['Arabic']
synth_dir = '/home/wsl/LMDB/MLT/SynthTextMlt/'
real_dir = '/home/wsl/LMDB/MLT/MLT2019/'

train_img_synth=[]
train_img_real=[]
test_img_real=[]
for lang in langs:
    train_img_synth.append(synth_dir+'/%s'%lang)
    train_img_real.append(real_dir+'/train/%s'%lang)

    test_img_real.append(real_dir + '/test/%s' % lang)

train_datasets=[]
test_datasets=[]
for i in range(len(langs)):
    train_synth_cur=dict(
    type='OCRDataset',
    img_prefix=train_img_synth[i],
    ann_file=train_img_synth[i],
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
    pipeline=None,
    test_mode=False)

    train_real_cur = dict(
        type=dataset_type,
        img_prefix=train_img_real[i],
        ann_file=train_img_real[i],
        loader=dict(
            type='AnnFileLoader',
            repeat=1,
            file_format='lmdb',
            parser=dict(
                type='LineJsonParser',
                keys=['filename', 'text'],
                )),
        pipeline=None,
        test_mode=False)
    train_datasets.extend([train_real_cur,train_synth_cur])

    test_real_cur = dict(
        type=dataset_type,
        img_prefix=test_img_real[i],
        ann_file=test_img_real[i],
        loader=dict(
            type='AnnFileLoader',
            repeat=1,
            file_format='lmdb',
            parser=dict(
                type='LineJsonParser',
                keys=['filename', 'text'],
                )),
        pipeline=None,
        test_mode=True)
    test_datasets.append(test_real_cur)


train_list=train_datasets
test_list=test_datasets