calisolo commited on
Commit
67e6c31
1 Parent(s): 5180fc4

Upload 8 files

Browse files

upload one of the checkpoint for NICE (image captioning challenge) entry based on OFA-huge

added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "../drive/MyDrive/NICE/model/cap_best/OFA-huge-caption",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "add_type_embedding": true,
6
+ "architectures": [
7
+ "OFAModelForCaption"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "attn_scale_factor": 2.0,
11
+ "bos_token_id": 0,
12
+ "classifier_dropout": 0.0,
13
+ "code_image_size": 128,
14
+ "code_layernorm_embedding": true,
15
+ "d_model": 1280,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_drop_path_rate": 0.0,
18
+ "decoder_ffn_dim": 5120,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 12,
21
+ "decoder_normalize_before": true,
22
+ "decoder_start_token_id": 0,
23
+ "dropout": 0.1,
24
+ "encoder_attention_heads": 16,
25
+ "encoder_drop_path_rate": 0.0,
26
+ "encoder_ffn_dim": 5120,
27
+ "encoder_layerdrop": 0.0,
28
+ "encoder_layers": 24,
29
+ "encoder_normalize_before": true,
30
+ "entangle_position_embedding": false,
31
+ "eos_token_id": 2,
32
+ "forced_eos_token_id": 2,
33
+ "image_bucket_size": 42,
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "layernorm_embedding": true,
37
+ "max_position_embeddings": 1024,
38
+ "model_type": "ofa",
39
+ "normformer": true,
40
+ "num_hidden_layers": 24,
41
+ "pad_token_id": 1,
42
+ "patch_layernorm_embedding": true,
43
+ "resnet_drop_path_rate": 0.0,
44
+ "resnet_model_path": null,
45
+ "resnet_type": "resnet152",
46
+ "scale_embedding": false,
47
+ "share_decoder_input_output_embed": true,
48
+ "token_bucket_size": 256,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.20.0",
51
+ "use_cache": false,
52
+ "vocab_size": 59457
53
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:124c6aae5f83a770d311fa14a81015a9679a57de6f0f012adbae414b8042fde6
3
+ size 3785732668
special_tokens_map.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[cosHint lv4]",
4
+ "[cosHint lv3]",
5
+ "[cosHint lv2]",
6
+ "[cosHint lv1]",
7
+ "[diffHint lv3]",
8
+ "[diffHint lv2]",
9
+ "[diffHint lv1]",
10
+ "[shot_style]",
11
+ "[Location]",
12
+ "[NULL]",
13
+ "[outdoors]",
14
+ "[misc]",
15
+ "[office]",
16
+ "[electronics]",
17
+ "[food]",
18
+ "[indoors]",
19
+ "[medical]",
20
+ "[stocky_setting]",
21
+ "[animals]"
22
+ ],
23
+ "bos_token": {
24
+ "content": "<s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "cls_token": {
31
+ "content": "<s>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "eos_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "mask_token": {
45
+ "content": "<mask>",
46
+ "lstrip": true,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ },
51
+ "pad_token": {
52
+ "content": "<pad>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false
57
+ },
58
+ "sep_token": {
59
+ "content": "</s>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ },
65
+ "unk_token": {
66
+ "content": "<unk>",
67
+ "lstrip": false,
68
+ "normalized": true,
69
+ "rstrip": false,
70
+ "single_word": false
71
+ }
72
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "[cosHint lv4]",
5
+ "[cosHint lv3]",
6
+ "[cosHint lv2]",
7
+ "[cosHint lv1]",
8
+ "[diffHint lv3]",
9
+ "[diffHint lv2]",
10
+ "[diffHint lv1]",
11
+ "[shot_style]",
12
+ "[Location]",
13
+ "[NULL]",
14
+ "[outdoors]",
15
+ "[misc]",
16
+ "[office]",
17
+ "[electronics]",
18
+ "[food]",
19
+ "[indoors]",
20
+ "[medical]",
21
+ "[stocky_setting]",
22
+ "[animals]"
23
+ ],
24
+ "bos_token": {
25
+ "__type": "AddedToken",
26
+ "content": "<s>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "cls_token": {
33
+ "__type": "AddedToken",
34
+ "content": "<s>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "eos_token": {
41
+ "__type": "AddedToken",
42
+ "content": "</s>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "errors": "replace",
49
+ "mask_token": {
50
+ "__type": "AddedToken",
51
+ "content": "<mask>",
52
+ "lstrip": true,
53
+ "normalized": true,
54
+ "rstrip": false,
55
+ "single_word": false
56
+ },
57
+ "name_or_path": "./vocab",
58
+ "pad_token": {
59
+ "__type": "AddedToken",
60
+ "content": "<pad>",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ "sep_token": {
67
+ "__type": "AddedToken",
68
+ "content": "</s>",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false
73
+ },
74
+ "special_tokens_map_file": null,
75
+ "tokenizer_class": "OFATokenizer",
76
+ "unk_token": {
77
+ "__type": "AddedToken",
78
+ "content": "<unk>",
79
+ "lstrip": false,
80
+ "normalized": true,
81
+ "rstrip": false,
82
+ "single_word": false
83
+ }
84
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5a5cc5f0011e268e718caa5b1ef21d0aabdcd3c5b3826ae00f8b5cbacd4b41a
3
+ size 3323
vocab.json ADDED
The diff for this file is too large to render. See raw diff