update weight and config to reflect latest changes from the pr

#1
by fcakyon - opened
Files changed (3) hide show
  1. config.json +3 -4
  2. preprocessor_config.json +26 -0
  3. pytorch_model.bin +2 -2
config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "architectures": [
3
- "TimeSformerForVideoClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.0,
6
  "attention_type": "divided_space_time",
7
- "drop_path_prob": 0,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
@@ -824,6 +824,5 @@
824
  "patch_size": 16,
825
  "qkv_bias": true,
826
  "torch_dtype": "float32",
827
- "transformers_version": "4.23.0.dev0",
828
- "use_mean_pooling": false
829
  }
 
1
  {
2
  "architectures": [
3
+ "TimesformerForVideoClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.0,
6
  "attention_type": "divided_space_time",
7
+ "drop_path_rate": 0,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
 
824
  "patch_size": 16,
825
  "qkv_bias": true,
826
  "torch_dtype": "float32",
827
+ "transformers_version": "4.25.0.dev0"
 
828
  }
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "image_processor_type": "VideoMAEImageProcessor",
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "resample": 2,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "shortest_edge": 224
25
+ }
26
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:968479f41eb3f2f4b1829ae06b663998dcc3782deb6a3e81746a9e51dbabd46d
3
- size 486346639
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdb264469ffc423fb86c78932268a290bfbd69bd768b7ce8595c047a2dc54089
3
+ size 486348721