_base_ = [
    '../../_base_/default_runtime.py',
    '../../_base_/schedules/schedule_adam_step_20e.py',

]

dataset_type = 'OCRDataset'
langs=['Arabic','Bangla','Chinese','Latin','Hindi','Korean','Japanese']
#langs=['Chinese']
synth_dir = '/home/wsl/LMDB/MLT/SynthTextMlt/'
real_dir = '/home/wsl/LMDB/MLT/MLT2019/'
unreal_dir='/home/wsl/LMDB/MLT/UnrealText/'
train_img_synth=[]
train_img_real=[]
train_img_unreal=[]
test_img_real=[]
for lang in langs:
    train_img_synth.append(synth_dir+'/%s'%lang)
    train_img_unreal.append(synth_dir + '/%s' % lang)
    train_img_real.append(real_dir+'/train/%s'%lang)

    test_img_real.append(real_dir + '/test/%s' % lang)

train_datasets=[]
test_datasets=[]
for i in range(len(langs)):
    train_synth_cur=dict(
    type='OCRDataset',
    img_prefix=train_img_synth[i],
    ann_file=train_img_synth[i],
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
    pipeline=None,
    test_mode=False)
    train_unreal_cur = dict(
        type='OCRDataset',
        img_prefix=train_img_unreal[i],
        ann_file=train_img_unreal[i],
        loader=dict(
            type='AnnFileLoader',
            repeat=1,
            file_format='lmdb',
            parser=dict(
                type='LineJsonParser',
                keys=['filename', 'text'],
            )),
        pipeline=None,
        test_mode=False)

    train_real_cur = dict(
        type=dataset_type,
        img_prefix=train_img_real[i],
        ann_file=train_img_real[i],
        loader=dict(
            type='AnnFileLoader',
            repeat=1,
            file_format='lmdb',
            parser=dict(
                type='LineJsonParser',
                keys=['filename', 'text'],
                )),
        pipeline=None,
        test_mode=False)
    train_datasets.extend([train_real_cur,train_synth_cur,train_unreal_cur])

    test_real_cur = dict(
        type=dataset_type,
        img_prefix=test_img_real[i],
        ann_file=test_img_real[i],
        loader=dict(
            type='AnnFileLoader',
            repeat=1,
            file_format='lmdb',
            parser=dict(
                type='LineJsonParser',
                keys=['filename', 'text'],
                )),
        pipeline=None,
        test_mode=True)
    test_datasets.append(test_real_cur)


train_list=train_datasets
test_list=test_datasets

img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
wh=[320,32]
train_pipeline = [
    dict(type='LoadImageFromLMDB'),
    dict(
        type='ResizeOCR',
        height=wh[1],
        min_width=wh[1],
        max_width=wh[0],
        keep_aspect_ratio=False,
        width_downsample_ratio=0.25),
    dict(
        type='RandomWrapper',
        p=0.5,
        transforms=[
            dict(
                type='OneOfWrapper',
                transforms=[
                    dict(
                        type='RandomRotateTextDet',
                        max_angle=15,
                    ),
                    dict(
                        type='TorchVisionWrapper',
                        op='RandomAffine',
                        degrees=15,
                        translate=(0.3, 0.3),
                        scale=(0.5, 2.),
                        shear=(-45, 45),
                    ),
                    dict(
                        type='TorchVisionWrapper',
                        op='RandomPerspective',
                        distortion_scale=0.5,
                        p=1,
                    ),
                ])
        ],
    ),
    dict(
        type='RandomWrapper',
        p=0.25,
        transforms=[
            dict(type='PyramidRescale'),
            dict(
                type='Albu',
                transforms=[
                    dict(type='GaussNoise', var_limit=(20, 20), p=0.5),
                    dict(type='MotionBlur', blur_limit=6, p=0.5),
                ]),
        ]),
    dict(
        type='RandomWrapper',
        p=0.25,
        transforms=[
            dict(
                type='TorchVisionWrapper',
                op='ColorJitter',
                brightness=0.5,
                saturation=0.5,
                contrast=0.5,
                hue=0.1),
        ]),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio',
            'resize_shape','img_prefix'
        ]),
]
test_pipeline = [
    dict(type='LoadImageFromLMDB'),
    # dict(
    #     type='MultiRotateAugOCR',
    #     rotate_degrees=[0, 90, 270],
    #     transforms=[
    #         dict(
    #             type='ResizeOCR',
    #             height=wh[1],
    #             min_width=wh[1],
    #             max_width=wh[0],
    #             keep_aspect_ratio=False,
    #             width_downsample_ratio=0.25),
    #         dict(type='ToTensorOCR'),
    #         dict(type='NormalizeOCR', **img_norm_cfg),
    #         dict(
    #             type='Collect',
    #             keys=['img'],
    #             meta_keys=[
    #                 'filename', 'ori_shape', 'img_shape', 'valid_ratio',
    #                 'resize_shape', 'img_norm_cfg', 'ori_filename'
    #             ]),
    #     ])
    dict(
        type='ResizeOCR',
        height=wh[1],
        min_width=wh[1],
        max_width=wh[0],
        keep_aspect_ratio=False,
        width_downsample_ratio=0.25),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'valid_ratio',
            'resize_shape', 'img_norm_cfg', 'ori_filename','img_prefix'
        ]),
]

# Model

dict_file_path = '/home/wsl/LMDB/MLT/MLT2019/dict/dict_all.txt'
label_convertor = dict(
    type='AttnConvertor', dict_file=dict_file_path,
    with_unknown=True,)
# model

model = dict(
    type='SATRN',
    backbone=dict(type='pvt_v2_b2_li', strides=[(4, 4), (2, 2), (2, 1), (2, 1)],
                  style='pytorch', pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2_li.pth'),
    encoder=dict(
        type='SatrnEncoder',
        n_layers=1,
        n_head=8,
        d_k=512 // 8,
        d_v=512 // 8,
        d_model=512,
        n_position=100,
        d_inner=256 * 4,
        dropout=0.1),
    decoder=dict(
        type='NRTRDecoder',
        n_layers=3,
        d_embedding=512,
        n_head=8,
        d_model=512,
        d_inner=256 * 4,
        d_k=512 // 8,
        d_v=512 // 8),
    loss=dict(type='TFLoss'),
    label_convertor=label_convertor,
    max_seq_len=50)

data = dict(
    samples_per_gpu=256,
    workers_per_gpu=8,
    val_dataloader=dict(samples_per_gpu=256),
    test_dataloader=dict(samples_per_gpu=128),
    train=dict(
        type='UniformConcatDataset',
        datasets=train_list,
        pipeline=train_pipeline),
    val=dict(
        type='UniformConcatDataset',
        datasets=test_list,
        pipeline=test_pipeline),
    test=dict(
        type='UniformConcatDataset',
        datasets=test_list,
        pipeline=test_pipeline))

# yapf:disable
optimizer = dict(type='Adam', lr=1e-3)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    step=[16, 18],
    warmup='linear',
    warmup_iters=1,
    warmup_ratio=0.001,
    warmup_by_epoch=True)
checkpoint_config = dict(interval=1,max_keep_ckpts=5)
log_config = dict(
    interval=50,
    hooks=[
        dict(type='TextLoggerHook')

    ])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None#'/media/wsl/D8A00CDCA00CC34C/log/mmocr_0.6/sar_mlt_syn_real/latest.pth'
resume_from =None#'~/wsl/mmocr_0.6/work_dirs/mlt/sar_multihead_all/latest.pth'
workflow = [('train', 10)]
runner = dict(type='EpochBasedRunner', max_epochs=50)
evaluation = dict(interval=1, metric='acc')
fp16=dict(loss_scale='dynamic')
#find_unused_parameters=True