#import pickle as pkl
_base_ = []

# yapf:disable

# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
workflow = [('train', 1)]


label_convertor = dict(
    type='AttnConvertor', dict_file='/home/wsl/LMDB/benchmark_dataset/chinese_dict.txt', with_unknown=True,lower=False)

hybrid_decoder = dict(type='SequenceAttentionDecoder')

position_decoder = dict(type='PositionAttentionDecoder')
model = dict(
     type='RobustScanner',
    backbone=dict(type='ResNet31OCR'),
    encoder=dict(
        type='ChannelReductionEncoder',
        in_channels=512,
        out_channels=128,
    ),
    decoder=dict(
        type='RobustScannerDecoder',
        dim_input=512,
        dim_model=128,
        hybrid_decoder=hybrid_decoder,
        position_decoder=position_decoder),
    loss=dict(type='SARLoss'),
    label_convertor=label_convertor,
    max_seq_len=40)


train_cfg = None
test_cfg = None

# optimizer
optimizer = dict(type='Adam', lr=1e-4)
optimizer_config = dict(grad_clip=dict(max_norm=10,norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[30000],
                 # warmup='linear',
                 # warmup_iters=2000,
                 )
#total_epochs = 20

# data
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

train_pipeline = [
     dict(type='LoadImageFromLMDB'),
    dict(
        type='ResizeOCR',
        height=64,
        min_width=64,
        max_width=352,
        keep_aspect_ratio=True,
        width_downsample_ratio=0.25),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'resize_shape', 'text', 'valid_ratio'
        ]),
]
test_pipeline = [
     dict(type='LoadImageFromLMDB'),
    dict(
        type='ResizeOCR',
        height=64,
        min_width=64,
        max_width=352,
        keep_aspect_ratio=True),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'resize_shape', 'text', 'valid_ratio'
        ]),
]
viz_pipeline = [
    dict(type='LoadImageFromFileRot'),
    dict(
        type='ResizeOCR',
        height=64,
        min_width=64,
        max_width=352,
        keep_aspect_ratio=True),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'resize_shape', 'valid_ratio'
        ]),
]
dataset_type = 'OCRDataset'
train_real='/home/wsl/LMDB/benchmark_dataset/scene/scene_train'
train_syn=''
train_paths=[train_real]

test_root='/home/wsl/LMDB/benchmark_dataset/scene/'
test_paths=[
    '/home/wsl/LMDB/benchmark_dataset/similar/char',
test_root+'scene_test',
]
train_datasets=[]
for path in train_paths:
    data0=dict(
        type=dataset_type,
        img_prefix=path,
    ann_file=path,
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
        pipeline=train_pipeline,
    )
    train_datasets.append(data0)

test_datasets=[]
for path in test_paths:
    data0 = dict(
        type=dataset_type,
        img_prefix=path,
    ann_file=path,
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
        pipeline=test_pipeline,
        test_mode=True)
    test_datasets.append(data0)

data = dict(
    samples_per_gpu=96,
    workers_per_gpu=8,
    val_dataloader=dict(samples_per_gpu=128),
    test_dataloader=dict(samples_per_gpu=192),
    train=dict(
        type='UniformConcatDataset',
        datasets=train_datasets,
        pipeline=train_pipeline),
    val=dict(
        type='UniformConcatDataset',
        datasets=test_datasets,
        pipeline=test_pipeline),
    test=dict(
        type='UniformConcatDataset',
        datasets=test_datasets,
        pipeline=viz_pipeline))


runner=dict(
    type='IterBasedRunner',
    max_iters=600000
)
log_config = dict(
    interval=50,
    hooks=[
        dict(type='TextLoggerHook')

    ])
resume_from='~/wsl/mmocr_0.6/work_dirs/zh/robust_scanner_scene/latest.pth'
load_from=None
checkpoint_config = dict(interval=5000,max_keep_ckpts=5)
evaluation = dict(interval=5000, metric='acc')
fp16=dict(loss_scale='dynamic')
