_base_ = [
    '../../_base_/default_runtime.py',
    '../../_base_/recog_datasets/ST_MJ_train.py',
    '../../_base_/recog_datasets/academic_test.py'
]

train_list = {{_base_.train_list}}
test_list = {{_base_.test_list}}

img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='ResizeOCR',
        height=64,
        min_width=200,
        max_width=200,
        keep_aspect_ratio=False,
        width_downsample_ratio=0.25),
    dict(type='ToTensorOCR'),
    dict(type='NormalizeOCR', **img_norm_cfg),
    dict(
        type='Collect',
        keys=['img'],
        meta_keys=[
            'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio',
            'resize_shape'
        ]),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiRotateAugOCR',
        rotate_degrees=[0, 90, 270],
        transforms=[
            dict(
                type='ResizeOCR',
                height=64,
                min_width=200,
                max_width=200,
                keep_aspect_ratio=False,
                width_downsample_ratio=0.25),
            dict(type='ToTensorOCR'),
            dict(type='NormalizeOCR', **img_norm_cfg),
            dict(
                type='Collect',
                keys=['img'],
                meta_keys=[
                    'filename', 'ori_shape', 'img_shape', 'valid_ratio',
                    'resize_shape', 'img_norm_cfg', 'ori_filename'
                ]),
        ])
]

label_convertor = dict(
    type='AttnConvertor',
    dict_file = '/home/wsl/OCR/mmocr_mlt/char_list/hindi_dict.txt',
    with_unknown=True)

model = dict(
    type='SATRN',
    backbone=dict(type='TempCNN', input_channels=3, hidden_dim=512),
    # encoder=dict(
    #     type='SatrnEncoder',
    #     n_layers=12,
    #     n_head=8,
    #     d_k=512 // 8,
    #     d_v=512 // 8,
    #     d_model=512,
    #     n_position=100,
    #     d_inner=512 * 4,
    #     dropout=0.1),
    # backbone=dict(type='None', input_channels=3, hidden_dim=512),
    encoder=dict(
        type='VitaeEncoder',
        RC_tokens_type=['window', 'window', 'transformer', 'transformer'],
        NC_tokens_type=['window', 'window', 'transformer', 'transformer'],
        stages=3,
        embed_dims=[96, 96, 256, 512],
        token_dims=[128, 256, 512, 1024],
        downsample_ratios=[2, 2, 2, 1],
        NC_depth=[2, 2, 12, 2],
        NC_heads=[1, 4, 8, 16],
        RC_heads=[1, 1, 4, 8],
        mlp_ratio=4.,
        NC_group=[1, 32, 64, 128],
        RC_group=[1, 16, 32, 64],
        # url='',
        num_classes=1000,
        img_size=(64, 200)
        # input_size=(3, 224, 224),
        # pool_size=None,
        # interpolation='bicubic',
        # crop_pct=.9,
        # mean=(0.485, 0.456, 0.406),
        # std=(0.229, 0.224, 0.225),
        # classifier='head'
    ),
    decoder=dict(
        type='NRTRDecoder',
        n_layers=6,
        d_embedding=512,
        n_head=8,
        d_model=512,
        d_inner=512 * 4,
        d_k=512 // 8,
        d_v=512 // 8),
    loss=dict(type='TFLoss'),
    label_convertor=label_convertor,
    max_seq_len=22)

# optimizer
optimizer = dict(type='Adam', lr=2e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[1000000])
total_epochs = 6

data = dict(
    samples_per_gpu=50,
    # workers_per_gpu=4,
    workers_per_gpu=50,
    val_dataloader=dict(samples_per_gpu=1),
    test_dataloader=dict(samples_per_gpu=1),
    train=dict(
        type='UniformConcatDataset',
        datasets=train_list,
        pipeline=train_pipeline),
    val=dict(
        type='UniformConcatDataset',
        datasets=test_list,
        pipeline=test_pipeline),
    test=dict(
        type='UniformConcatDataset',
        datasets=test_list,
        pipeline=test_pipeline))

evaluation = dict(interval=40000000000000000, metric='acc', by_epoch=False)


checkpoint_config = dict(interval=400, by_epoch=False)