nielsr HF staff commited on
Commit
21f83ff
1 Parent(s): a841cc4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -28,17 +28,17 @@ You can use the raw model for predicting pixel values for masked patches of a vi
28
  Here is how to use this model to predict pixel values for randomly masked patches:
29
 
30
  ```python
31
- from transformers import VideoMAEFeatureExtractor, VideoMAEForPreTraining
32
  import numpy as np
33
  import torch
34
 
35
  num_frames = 16
36
  video = list(np.random.randn(16, 3, 224, 224))
37
 
38
- feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base-short")
39
  model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short")
40
 
41
- pixel_values = feature_extractor(video, return_tensors="pt").pixel_values
42
 
43
  num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
44
  seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
 
28
  Here is how to use this model to predict pixel values for randomly masked patches:
29
 
30
  ```python
31
+ from transformers import VideoMAEImageProcessor, VideoMAEForPreTraining
32
  import numpy as np
33
  import torch
34
 
35
  num_frames = 16
36
  video = list(np.random.randn(16, 3, 224, 224))
37
 
38
+ processor = VideoMAEImageProcessor.from_pretrained("MCG-NJU/videomae-base-short")
39
  model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short")
40
 
41
+ pixel_values = processor(video, return_tensors="pt").pixel_values
42
 
43
  num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
44
  seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame