update weight and config to reflect latest changes from the pr

#1
by fcakyon - opened
Files changed (3) hide show
  1. config.json +3 -4
  2. preprocessor_config.json +26 -0
  3. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "architectures": [
3
- "TimeSformerForVideoClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.0,
6
  "attention_type": "divided_space_time",
7
- "drop_path_prob": 0,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
@@ -1224,6 +1224,5 @@
1224
  "patch_size": 16,
1225
  "qkv_bias": true,
1226
  "torch_dtype": "float32",
1227
- "transformers_version": "4.23.0.dev0",
1228
- "use_mean_pooling": false
1229
  }
 
1
  {
2
  "architectures": [
3
+ "TimesformerForVideoClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.0,
6
  "attention_type": "divided_space_time",
7
+ "drop_path_rate": 0,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
 
1224
  "patch_size": 16,
1225
  "qkv_bias": true,
1226
  "torch_dtype": "float32",
1227
+ "transformers_version": "4.25.0.dev0"
 
1228
  }
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "image_processor_type": "VideoMAEImageProcessor",
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "resample": 2,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "shortest_edge": 224
25
+ }
26
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc374df93d002aef750cc80325347593aed42b0660a3a1c413056339ea3d7861
3
- size 488792719
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:199273d84d79cea619f71b3bcc8c2bdd5538e78420e7be19b8da15f46126d285
3
+ size 488794801