Update README.md
Browse files
README.md
CHANGED
@@ -27,3 +27,54 @@ Model achieves accuracy of 88.93% and macro-f1 of 89.19%
|
|
27 |
Class-wise accuracies: ECS - 91.16%, CS - 83.65, MS - 86.2%, FS - 90.74%, LS - 94.55%
|
28 |
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
Class-wise accuracies: ECS - 91.16%, CS - 83.65, MS - 86.2%, FS - 90.74%, LS - 94.55%
|
28 |
|
29 |
|
30 |
+
## How to use
|
31 |
+
|
32 |
+
This is how model can be tested on a shot/clip from a video.
|
33 |
+
Same code is used to process, transform and evaluate on the movienet test set.
|
34 |
+
|
35 |
+
```python
|
36 |
+
from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification
|
37 |
+
from pytorchvideo.transforms import ApplyTransformToKey
|
38 |
+
from torchvision.transforms import v2
|
39 |
+
from decord import VideoReader, cpu
|
40 |
+
|
41 |
+
## Evaluation Transform
|
42 |
+
transform = v2.Compose(
|
43 |
+
[
|
44 |
+
ApplyTransformToKey(
|
45 |
+
key="video",
|
46 |
+
transform=v2.Compose(
|
47 |
+
[
|
48 |
+
v2.Lambda(lambda x: x.permute(0, 3, 1, 2)), # T, H, W, C -> T, C, H, W
|
49 |
+
v2.UniformTemporalSubsample(16),
|
50 |
+
v2.Resize(resize_to),
|
51 |
+
v2.Lambda(lambda x: x / 255.0),
|
52 |
+
v2.Normalize(img_mean, img_std)
|
53 |
+
]
|
54 |
+
),
|
55 |
+
),
|
56 |
+
]
|
57 |
+
)
|
58 |
+
|
59 |
+
## Preprocessor and Model loading
|
60 |
+
image_processor = VideoMAEImageProcessor.from_pretrained("gullalc/videomae-base-finetuned-kinetics-movieshots-scale")
|
61 |
+
model = VideoMAEForVideoClassification.from_pretrained("gullalc/videomae-base-finetuned-kinetics-movieshots-scale")
|
62 |
+
|
63 |
+
img_mean = image_processor.image_mean
|
64 |
+
img_std = image_processor.image_std
|
65 |
+
height = width = image_processor.size["shortest_edge"]
|
66 |
+
resize_to = (height, width)
|
67 |
+
|
68 |
+
## load video/clip and predict
|
69 |
+
video_path = "random_clip.mp4"
|
70 |
+
vr = VideoReader(video_path, width=480, height=270, ctx=cpu(0))
|
71 |
+
frames_tensor = torch.stack([torch.tensor(vr[i].asnumpy()) for i in range(len(vr))]) ## Shape: (T, H, W, C)
|
72 |
+
|
73 |
+
frames_tensor = transform({"video": frames_tensor})["video"]
|
74 |
+
|
75 |
+
output = model(pixel_values=frames_tensor)
|
76 |
+
pred = torch.argmax(outputs.logits, axis=1).cpu().numpy()
|
77 |
+
|
78 |
+
print(model.config.id2label[pred[0]])
|
79 |
+
```
|
80 |
+
|