#import pickle as pkl
_base_ = []

# yapf:disable

# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
workflow = [('train', 1)]


label_convertor = dict(
    type='CTCConvertor', dict_file='/ssd/wsl/LMDB/Handwriting/CASIA.txt', with_unknown=True,lower=False)

model = dict(
     type='SATRN',
    backbone=dict(type='pvt_v2_b2_li', strides=[(4, 4), (2, 2), (2,2), (2, 1)],
                  style='pytorch', pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2_li.pth',
                  ),
    encoder=None,
    decoder=dict(type='CRNNDecoder', in_channels=512, rnn_flag=False,drop_out=0.5),
    loss=dict(type='CTCLoss'),
    label_convertor=label_convertor,
    max_seq_len=55)



train_cfg = None
test_cfg = None

# optimizer
optimizer = dict(type='Adam', lr=4e-4)
optimizer_config = dict(grad_clip=dict(max_norm=10,norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[15,40],
                 # warmup='linear',
                 # warmup_iters=2000,
                 )
#total_epochs = 20

# data
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
wh=[1200,64]
train_pipeline = [
    dict(type='LoadImageFromLMDB'),
    dict(
        type='ResizeOCR',
        height=wh[1],
        min_width=wh[1],
        max_width=wh[0],
        keep_aspect_ratio=False,
        width_downsample_ratio=1/16.),
    dict(
        type='RandomWrapper',
        p=0.5,
        transforms=[
            dict(
                type='OneOfWrapper',
                transforms=[
                    dict(
                        type='RandomRotateTextDet',
                        max_angle=15,
                    ),
                    dict(
                        type='TorchVisionWrapper',
                        op='RandomAffine',
                        degrees=15,
                        translate=(0.3, 0.3),
                        scale=(0.5, 2.),
                        shear=(-45, 45),
                    ),
                    dict(
                        type='TorchVisionWrapper',
                        op='RandomPerspective',
                        distortion_scale=0.5,
                        p=1,
                    ),
                ])
        ],
    ),
    dict(
        type='RandomWrapper',
        p=0.25,
        transforms=[
            dict(type='PyramidRescale'),
            dict(
                type='Albu',
                transforms=[
                    dict(type='GaussNoise', var_limit=(20, 20), p=0.5),
                    dict(type='MotionBlur', blur_limit=6, p=0.5),
                ]),
        ]),
    dict(
        type='RandomWrapper',
        p=0.25,
        transforms=[
            dict(
                type='TorchVisionWrapper',
                op='ColorJitter',
                brightness=0.5,
                saturation=0.5,
                contrast=0.5,
                hue=0.1),
        ]),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio',
            'resize_shape'
        ]),
]
test_pipeline = [
    dict(type='LoadImageFromLMDB'),
    # dict(
    #     type='MultiRotateAugOCR',
    #     rotate_degrees=[0, 90, 270],
    #     transforms=[
    #         dict(
    #             type='ResizeOCR',
    #             height=wh[1],
    #             min_width=wh[1],
    #             max_width=wh[0],
    #             keep_aspect_ratio=False,
    #             width_downsample_ratio=0.25),
    #         dict(type='ToTensorOCR'),
    #         dict(type='NormalizeOCR', **img_norm_cfg),
    #         dict(
    #             type='Collect',
    #             keys=['img'],
    #             meta_keys=[
    #                 'filename', 'ori_shape', 'img_shape', 'valid_ratio',
    #                 'resize_shape', 'img_norm_cfg', 'ori_filename'
    #             ]),
    #     ])
    dict(
        type='ResizeOCR',
        height=wh[1],
        min_width=wh[1],
        max_width=wh[0],
        keep_aspect_ratio=False,
        width_downsample_ratio=1/16.),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'valid_ratio',
            'resize_shape', 'img_norm_cfg', 'ori_filename'
        ]),
]

viz_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='ResizeOCR',
        height=wh[1],
        min_width=wh[1],
        max_width=wh[0],
        keep_aspect_ratio=False,
        width_downsample_ratio=1/16.),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'valid_ratio',
            'resize_shape', 'img_norm_cfg', 'ori_filename'
        ]),
]
dataset_type = 'OCRDataset'
train_real='/ssd/wsl/LMDB/Handwriting/HWDB2/train'
train_synth='/ssd/wsl/LMDB/Handwriting/SynthHWDB'
train_synth_1='/ssd/wsl/LMDB/Handwriting/SynthHWDB_IC13'
train_paths=[train_real,train_synth]

test_paths=[
    '/ssd/wsl/LMDB/Handwriting/HWDB2/test/',
    '/ssd/wsl/LMDB/Handwriting/ICDAR2013/test',
]
train_datasets=[]
for path in train_paths:
    data0=dict(
        type=dataset_type,
        img_prefix=path,
    ann_file=path,
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
        pipeline=train_pipeline,
    )
    train_datasets.append(data0)

test_datasets=[]
for path in test_paths:
    data0 = dict(
        type=dataset_type,
        img_prefix=path,
    ann_file=path,
    loader=dict(
        type='AnnFileLoader',
        repeat=1,
        file_format='lmdb',
        parser=dict(
            type='LineJsonParser',
            keys=['filename', 'text'],
            )),
        pipeline=test_pipeline,
        test_mode=True)
    test_datasets.append(data0)

data = dict(
    samples_per_gpu=96,
    workers_per_gpu=8,
    val_dataloader=dict(samples_per_gpu=128),
    test_dataloader=dict(samples_per_gpu=128),
    train=dict(
        type='UniformConcatDataset',
        datasets=train_datasets,
        pipeline=train_pipeline),
    val=dict(
        type='UniformConcatDataset',
        datasets=test_datasets,
        pipeline=test_pipeline),
    test=dict(
        type='UniformConcatDataset',
        datasets=test_datasets,
        pipeline=viz_pipeline))


runner = dict(type='EpochBasedRunner', max_epochs=50)
log_config = dict(
    interval=50,
    hooks=[
        dict(type='TextLoggerHook')

    ])
resume_from=None#'~/wsl/mmocr_0.6/work_dirs/zh/robust_scanner_scene/latest.pth'
load_from=None#'~/wsl/mmocr_0.6/work_dirs/zh/hw/pvt_ctc_hwdb2/latest.pth'
checkpoint_config = dict(interval=1,max_keep_ckpts=5)
evaluation = dict(interval=1, metric='acc')
fp16=dict(loss_scale='dynamic')
