SeHwanJoo's picture
Upload LayoutElementDetector
63f5833 verified
raw
history blame
6.23 kB
{
"architectures": [
"LayoutElementDetector"
],
"input": {
"max_size": 1333,
"min_size": 800,
"model_type": ""
},
"model": {
"anchor_generator": {
"aspect_ratios": [
[
0.5,
1.0,
2.0
]
],
"model_type": "",
"offset": 0.0,
"sizes": [
[
32
],
[
64
],
[
128
],
[
256
],
[
512
]
]
},
"backbone": {
"config": {
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_probs_dropout_prob": 0.1,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 0,
"chunk_size_feed_forward": 0,
"classifier_dropout": null,
"coordinate_size": 128,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"discrete_vae_type": "dall-e",
"discrete_vae_weight_path": "",
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"has_relative_attention_bias": false,
"has_spatial_attention_bias": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"input_size": 224,
"intermediate_size": 3072,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_2d_position_embeddings": 1024,
"max_length": 20,
"max_position_embeddings": 514,
"max_rel_2d_pos": 256,
"max_rel_pos": 128,
"mim": false,
"min_length": 0,
"model_type": "uplayoutlmv3",
"no_repeat_ngram_size": 0,
"num_attention_heads": 12,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_hidden_layers": 12,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 1,
"patch_size": 16,
"position_embedding_type": "absolute",
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"rel_2d_pos_bins": 64,
"rel_pos_bins": 32,
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"second_input_size": 112,
"sep_token_id": null,
"shape_size": 128,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"text_embed": true,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "float16",
"torchscript": false,
"type_vocab_size": 1,
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"visual_embed": true,
"vocab_size": 50265,
"wpa_task": false
},
"drop_path": 0.1,
"img_size": [
224,
224
],
"model_type": "",
"out_features": [
"layer3",
"layer5",
"layer7",
"layer11"
],
"pos_type": "abs"
},
"fpn": {
"fuse_type": "sum",
"in_features": [
"layer3",
"layer5",
"layer7",
"layer11"
],
"model_type": "",
"out_channels": 256
},
"mask_on": true,
"model_type": "",
"pixel_mean": [
127.5,
127.5,
127.5
],
"pixel_std": [
127.5,
127.5,
127.5
],
"roi_box_cascade_head": {
"bbox_reg_weights": [
[
10.0,
10.0,
5.0,
5.0
],
[
20.0,
20.0,
10.0,
10.0
],
[
30.0,
30.0,
15.0,
15.0
]
],
"ious": [
0.5,
0.6,
0.7
],
"model_type": ""
},
"roi_box_head": {
"cls_agnostic_bbox_reg": true,
"fc_dim": 1024,
"model_type": "",
"num_fc": 2,
"pooler_resolution": 7,
"pooler_sampling_ratio": 0,
"use_sigmoid_ce": false
},
"roi_heads": {
"in_features": [
"p2",
"p3",
"p4",
"p5"
],
"in_strides": [
4,
8,
16,
32
],
"iou_labels": [
0,
1
],
"iou_thresholds": [
0.5
],
"model_type": "",
"num_classes": 8,
"test_nms_thresh": 0.5,
"test_score_thresh": 0.05
},
"roi_mask_head": {
"cls_agnostic_mask": false,
"conv_dim": 256,
"model_type": "",
"num_conv": 4,
"pooler_resolution": 14,
"pooler_sampling_ratio": 0
},
"rpn": {
"anchor_boundary_thresh": -1,
"bbox_reg_weights": [
1.0,
1.0,
1.0,
1.0
],
"conv_dims": [
-1
],
"in_channels": null,
"in_features": [
"p2",
"p3",
"p4",
"p5",
"p6"
],
"in_strides": [
4,
8,
16,
32,
64
],
"min_size": 0,
"model_type": "",
"nms_thresh": 0.7,
"post_nms_topk_test": 1000,
"pre_nms_topk_test": 1000
},
"weights": null
},
"model_type": "",
"test": {
"detections_per_images": 100,
"label_replace_dict": {
"Im_Table": "Paragraph"
},
"model_type": ""
},
"torch_dtype": "float16",
"transformers_version": "4.39.3"
}