Spaces:
Sleeping
Sleeping
File size: 12,902 Bytes
5ebeb73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 |
default_scope = "mmocr"
env_cfg = dict(
cudnn_benchmark=True, mp_cfg=dict(mp_start_method="fork", opencv_num_threads=0), dist_cfg=dict(backend="nccl")
)
randomness = dict(seed=None)
default_hooks = dict(
timer=dict(type="IterTimerHook"),
logger=dict(type="LoggerHook", interval=100),
param_scheduler=dict(type="ParamSchedulerHook"),
checkpoint=dict(type="CheckpointHook", interval=1),
sampler_seed=dict(type="DistSamplerSeedHook"),
sync_buffer=dict(type="SyncBuffersHook"),
visualization=dict(type="VisualizationHook", interval=1, enable=False, show=False, draw_gt=False, draw_pred=False),
)
log_level = "INFO"
log_processor = dict(type="LogProcessor", window_size=10, by_epoch=True)
load_from = (
"/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/models/checkpoints/1700_1800_combined_satrn/epoch_5.pth"
)
resume = False
val_evaluator = dict(
type="Evaluator",
metrics=[
dict(
type="WordMetric",
mode=["exact", "ignore_case", "ignore_case_symbol"],
valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]",
),
dict(type="CharMetric", valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]"),
dict(type="OneMinusNEDMetric", valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]"),
],
)
test_evaluator = dict(
type="Evaluator",
metrics=[
dict(
type="WordMetric",
mode=["exact", "ignore_case", "ignore_case_symbol"],
valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]",
),
dict(type="CharMetric", valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]"),
dict(type="OneMinusNEDMetric", valid_symbol="[^A-Z^a-z^0-9^一-龥^å^ä^ö^Å^Ä^Ö]"),
],
)
vis_backends = [dict(type="LocalVisBackend")]
visualizer = dict(type="TextRecogLocalVisualizer", name="visualizer", vis_backends=[dict(type="TensorboardVisBackend")])
optim_wrapper = dict(type="OptimWrapper", optimizer=dict(type="Adam", lr=0.0003))
train_cfg = dict(type="EpochBasedTrainLoop", max_epochs=5, val_interval=1)
val_cfg = dict(type="ValLoop")
test_cfg = dict(type="TestLoop")
param_scheduler = [dict(type="MultiStepLR", milestones=[3, 4], end=5)]
file_client_args = dict(backend="disk")
dictionary = dict(
type="Dictionary",
dict_file="./models/SATRN/dict1700.txt",
with_padding=True,
with_unknown=True,
same_start_end=True,
with_start=True,
with_end=True,
)
model = dict(
type="SATRN",
backbone=dict(type="ShallowCNN", input_channels=3, hidden_dim=512),
encoder=dict(
type="SATRNEncoder",
n_layers=12,
n_head=8,
d_k=64,
d_v=64,
d_model=512,
n_position=100,
d_inner=2048,
dropout=0.1,
),
decoder=dict(
type="NRTRDecoder",
n_layers=6,
d_embedding=512,
n_head=8,
d_model=512,
d_inner=2048,
d_k=64,
d_v=64,
module_loss=dict(type="CEModuleLoss", flatten=True, ignore_first_char=True),
dictionary=dict(
type="Dictionary",
dict_file="./models/SATRN/dict1700.txt",
with_padding=True,
with_unknown=True,
same_start_end=True,
with_start=True,
with_end=True,
),
max_seq_len=100,
postprocessor=dict(type="AttentionPostprocessor"),
),
data_preprocessor=dict(
type="TextRecogDataPreprocessor", mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]
),
)
train_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk"), ignore_empty=True, min_size=2),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
]
test_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
]
HTR_1700_combined_train = dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_HTR_shuffled_train.jsonl",
test_mode=False,
pipeline=None,
)
HTR_1700_combined_test = dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_HTR_shuffled_val.jsonl",
test_mode=True,
pipeline=None,
)
pr_cr_combined_train = dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineStrParser", keys=["filename", "text"], separator="|"),
data_root="/ceph/hpc/scratch/user/euerikl/data/line_images",
ann_file="/ceph/hpc/home/euerikl/projects/htr_1800/gt_files/combined_train.txt",
test_mode=False,
pipeline=None,
)
pr_cr_combined_test = dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineStrParser", keys=["filename", "text"], separator="|"),
data_root="/ceph/hpc/scratch/user/euerikl/data/line_images",
ann_file="/ceph/hpc/home/euerikl/projects/htr_1800/gt_files/combined_eval.txt",
test_mode=True,
pipeline=None,
)
out_of_domain_1700_all_test = dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_testsets_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_testsets_gt/1700_HTR_testsets_all.jsonl",
test_mode=True,
pipeline=None,
)
train_list = [
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_HTR_shuffled_train.jsonl",
test_mode=False,
pipeline=None,
),
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineStrParser", keys=["filename", "text"], separator="|"),
data_root="/ceph/hpc/scratch/user/euerikl/data/line_images",
ann_file="/ceph/hpc/home/euerikl/projects/htr_1800/gt_files/combined_train.txt",
test_mode=False,
pipeline=None,
),
]
test_list = [
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_testsets_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_testsets_gt/1700_HTR_testsets_all.jsonl",
test_mode=True,
pipeline=None,
)
]
train_dataset = dict(
type="ConcatDataset",
datasets=[
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_HTR_shuffled_train.jsonl",
test_mode=False,
pipeline=None,
),
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineStrParser", keys=["filename", "text"], separator="|"),
data_root="/ceph/hpc/scratch/user/euerikl/data/line_images",
ann_file="/ceph/hpc/home/euerikl/projects/htr_1800/gt_files/combined_train.txt",
test_mode=False,
pipeline=None,
),
],
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk"), ignore_empty=True, min_size=2),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
],
)
test_dataset = dict(
type="ConcatDataset",
datasets=[
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_testsets_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_testsets_gt/1700_HTR_testsets_all.jsonl",
test_mode=True,
pipeline=None,
)
],
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
],
)
train_dataloader = dict(
batch_size=8,
num_workers=1,
persistent_workers=True,
sampler=dict(type="DefaultSampler", shuffle=True),
dataset=dict(
type="ConcatDataset",
datasets=[
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_HTR_shuffled_train.jsonl",
test_mode=False,
pipeline=None,
),
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineStrParser", keys=["filename", "text"], separator="|"),
data_root="/ceph/hpc/scratch/user/euerikl/data/line_images",
ann_file="/ceph/hpc/home/euerikl/projects/htr_1800/gt_files/combined_train.txt",
test_mode=False,
pipeline=None,
),
],
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk"), ignore_empty=True, min_size=2),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
],
),
)
test_dataloader = dict(
batch_size=8,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type="DefaultSampler", shuffle=False),
dataset=dict(
type="ConcatDataset",
datasets=[
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_testsets_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_testsets_gt/1700_HTR_testsets_all.jsonl",
test_mode=True,
pipeline=None,
)
],
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
],
),
)
val_dataloader = dict(
batch_size=8,
num_workers=1,
persistent_workers=True,
drop_last=False,
sampler=dict(type="DefaultSampler", shuffle=False),
dataset=dict(
type="ConcatDataset",
datasets=[
dict(
type="RecogTextDataset",
parser_cfg=dict(type="LineJsonParser", keys=["filename", "text"]),
data_root="/ceph/hpc/scratch/user/euerikl/data/HTR_1700_testsets_clean",
ann_file="/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/data/processed/1700_testsets_gt/1700_HTR_testsets_all.jsonl",
test_mode=True,
pipeline=None,
)
],
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(400, 64), keep_ratio=False),
dict(type="LoadOCRAnnotations", with_text=True),
dict(type="PackTextRecogInputs", meta_keys=("img_path", "ori_shape", "img_shape", "valid_ratio")),
],
),
)
gpu_ids = range(0, 4)
cudnn_benchmark = True
work_dir = "/ceph/hpc/home/euerikl/projects/hf_openmmlab_models/models/checkpoints/1700_1800_combined_satrn"
checkpoint_config = dict(interval=1)
auto_scale_lr = dict(base_batch_size=32)
launcher = "pytorch"
|