Raghavan commited on
Commit
8d54e37
1 Parent(s): 709c44b

Upload 7 files

Browse files
Files changed (2) hide show
  1. config.json +7 -7
  2. pytorch_model.bin +1 -1
config.json CHANGED
@@ -2,14 +2,13 @@
2
  "activation_dropout": 0.0,
3
  "activation_fn": "gelu",
4
  "architectures": [
5
- "Beit3ForVisualQuestionAnswering"
6
  ],
7
  "attention_dropout": 0.0,
8
  "deepnorm": false,
9
  "dropout": 0.0,
10
- "embed_dim": 768,
11
  "encoder_normalize_before": false,
12
- "hidden_size": 3072,
13
  "id2label": {
14
  "0": "net",
15
  "1": "pitcher",
@@ -3141,8 +3140,9 @@
3141
  "3127": "crosstown",
3142
  "3128": "freightliner"
3143
  },
3144
- "img_size": 480,
3145
  "initializer_range": 0.02,
 
3146
  "label2id": {
3147
  "": 125,
3148
  "0": 82,
@@ -6276,16 +6276,16 @@
6276
  },
6277
  "label_smoothing": 0.1,
6278
  "layer_norm_eps": 1e-05,
6279
- "layers": 12,
6280
  "logit_scale_init_value": 2.65926,
6281
- "max_source_positions": 1024,
6282
  "model_type": "beit3",
6283
  "normalize_before": true,
6284
  "num_attention_heads": 12,
6285
  "num_channels": 3,
 
6286
  "patch_size": 16,
6287
  "sub_layernorm": true,
6288
  "torch_dtype": "float32",
6289
- "transformers_version": "4.34.0.dev0",
6290
  "vocab_size": 64010
6291
  }
 
2
  "activation_dropout": 0.0,
3
  "activation_fn": "gelu",
4
  "architectures": [
5
+ "Beit3ForQuestionAnswering"
6
  ],
7
  "attention_dropout": 0.0,
8
  "deepnorm": false,
9
  "dropout": 0.0,
 
10
  "encoder_normalize_before": false,
11
+ "hidden_size": 768,
12
  "id2label": {
13
  "0": "net",
14
  "1": "pitcher",
 
3140
  "3127": "crosstown",
3141
  "3128": "freightliner"
3142
  },
3143
+ "image_size": 480,
3144
  "initializer_range": 0.02,
3145
+ "intermediate_size": 3072,
3146
  "label2id": {
3147
  "": 125,
3148
  "0": 82,
 
6276
  },
6277
  "label_smoothing": 0.1,
6278
  "layer_norm_eps": 1e-05,
 
6279
  "logit_scale_init_value": 2.65926,
6280
+ "max_position_embeddings": 1024,
6281
  "model_type": "beit3",
6282
  "normalize_before": true,
6283
  "num_attention_heads": 12,
6284
  "num_channels": 3,
6285
+ "num_hidden_layers": 12,
6286
  "patch_size": 16,
6287
  "sub_layernorm": true,
6288
  "torch_dtype": "float32",
6289
+ "transformers_version": "4.35.0.dev0",
6290
  "vocab_size": 64010
6291
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a123b05037fd9acf4733ccac5e5d0138642f1927a5463852bb5950d0d4e65987
3
  size 912607569
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84780f38408aa7b42bbf22c3a85d8a2362610fd7f9a7051d238cfdc4a00a3e8e
3
  size 912607569