Neleac commited on
Commit
f21473d
1 Parent(s): a0b61b8

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +44 -0
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TimeSformer-GPT2 Video Captioning
2
+
3
+ Vision Encoder Model: [timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600) \
4
+ Text Decoder Model: [gpt2](https://huggingface.co/gpt2)
5
+
6
+ #### Example Inference Code:
7
+ ```python
8
+ import av
9
+ import numpy as np
10
+ import torch
11
+ from transformers import AutoImageProcessor, AutoTokenizer, VisionEncoderDecoderModel
12
+
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+
15
+ # load pretrained processor, tokenizer, and model
16
+ image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
17
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
18
+ model = VisionEncoderDecoderModel.from_pretrained("Neleac/timesformer-gpt2-video-captioning").to(device)
19
+
20
+ # load video
21
+ video_path = "never_gonna_give_you_up.mp4"
22
+ container = av.open(video_path)
23
+
24
+ # extract evenly spaced frames from video
25
+ seg_len = container.streams.video[0].frames
26
+ clip_len = model.config.encoder.num_frames
27
+ indices = set(np.linspace(0, seg_len, num=clip_len, endpoint=False).astype(np.int64))
28
+ frames = []
29
+ container.seek(0)
30
+ for i, frame in enumerate(container.decode(video=0)):
31
+ if i in indices:
32
+ frames.append(frame.to_ndarray(format="rgb24"))
33
+
34
+ # generate caption
35
+ gen_kwargs = {
36
+ "min_length": 10,
37
+ "max_length": 20,
38
+ "num_beams": 8,
39
+ }
40
+ pixel_values = image_processor(frames, return_tensors="pt").pixel_values.to(device)
41
+ tokens = model.generate(pixel_values, **gen_kwargs)
42
+ caption = tokenizer.batch_decode(tokens, skip_special_tokens=True)[0]
43
+ print(caption) # A man and a woman are dancing on a stage in front of a mirror.
44
+ ```