File size: 7,778 Bytes
8b1feb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bee4af
8b1feb9
 
 
 
 
 
 
 
 
46170b6
8b1feb9
 
 
 
5f4ce2c
8b1feb9
8bee4af
 
 
5f4ce2c
8b1feb9
 
8bee4af
 
5f4ce2c
 
 
 
 
 
 
62cb8e9
 
 
 
5f4ce2c
62cb8e9
 
eefdeb6
8b1feb9
2251212
 
c97026d
 
5f4ce2c
c97026d
 
8b1feb9
 
 
5f4ce2c
c97026d
 
 
5f4ce2c
8b1feb9
 
 
 
 
 
 
 
 
 
 
 
5f4ce2c
 
8b1feb9
 
 
 
 
 
 
12f763a
8b1feb9
 
5f4ce2c
 
 
 
2f49504
 
5f4ce2c
 
8b1feb9
 
 
 
 
 
 
 
 
 
15b3749
 
 
 
 
8b1feb9
 
 
 
 
15b3749
8b1feb9
 
8bee4af
5f4ce2c
 
 
 
 
 
18683f1
5f4ce2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b1feb9
 
5f4ce2c
78a010b
8b1feb9
 
5f4ce2c
 
8b1feb9
 
 
 
 
 
2251212
8b1feb9
 
 
 
 
 
 
 
 
 
 
 
 
 
46170b6
 
8b1feb9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
import torch
import clip
import cv2, youtube_dl
from PIL import Image,ImageDraw, ImageFont
import os
from functools import partial
from multiprocessing.pool import Pool
import shutil
from pathlib import Path
import numpy as np
import datetime
import gradio as gr


# load model and preprocess
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32")


def select_video_format(url, format_note='480p', ext='mp4'):
    defaults = ['480p', '360p','240p','144p']
    ydl_opts = {}
    ydl = youtube_dl.YoutubeDL(ydl_opts)
    info_dict = ydl.extract_info(url, download=False)
    formats = info_dict.get('formats', None)
    available_format_notes = set([f['format_note'] for f in formats])
    if format_note not in available_format_notes:
        format_note = [d for d in defaults if d in available_format_notes][0]
    formats = [f for f in formats if f['format_note'] == format_note and f['ext'] == ext and f['vcodec'].split('.')[0] != 'av01']
    format = formats[0]
    format_id = format.get('format_id', None)
    fps = format.get('fps', None)
    print(f'format selected: {format}')
    return(format, format_id, fps)


# to-do: delete saved videos    
# testing aria2c  
def download_video(url,format_id, n_keep=10):
    ydl_opts = {
      'format':format_id,
      'external_downloader' : 'aria2c',
      'external_downloader_args' :['--max-connection-per-server=16','--dir=videos'],
      'outtmpl': "videos/%(id)s.%(ext)s"}
    # create a directory for saved videos
    video_path = Path('videos')
    try:
      video_path.mkdir(parents=True)
    except FileExistsError:
      pass
    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        try:
            ydl.cache.remove()
            meta = ydl.extract_info(url)
            save_location = 'videos/' + meta['id'] + '.' + meta['ext']
        except youtube_dl.DownloadError as error:
            print(f'error with download_video function: {error}')
        return(save_location)

def process_video_parallel(video, skip_frames, dest_path, num_processes, process_number):
    cap = cv2.VideoCapture(video)
    frames_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes)
    count =  frames_per_process * process_number
    cap.set(cv2.CAP_PROP_POS_FRAMES, count)
    print(f"worker: {process_number}, process frames {count} ~ {frames_per_process * (process_number + 1)} \n total number of frames: {cap.get(cv2.CAP_PROP_FRAME_COUNT)} \n video: {video}; isOpen? : {cap.isOpened()}")
    while count < frames_per_process * (process_number + 1) :
        ret, frame = cap.read()
        if not ret:
            break
        if count  % skip_frames ==0:
          filename =f"{dest_path}/{count}.jpg"
          cv2.imwrite(filename, frame)
          #print(f"saved {filename}")
        count += 1
    cap.release()


def vid2frames(url, sampling_interval=1, ext='mp4'):
  # create folder for extracted frames - if folder exists, delete and create a new one
    dest_path = Path('frames')
    try:
        dest_path.mkdir(parents=True)
    except FileExistsError:
        shutil.rmtree(dest_path)
        dest_path.mkdir(parents=True)
    # figure out the format for download,
    # by default select 480p and .mp4 
    format, format_id, fps = select_video_format(url, format_note='480p', ext='mp4')
    # download the video 
    video = download_video(url,format_id)
    # calculate skip_frames
    try:
        skip_frames = int(fps * sampling_interval)
    except:
        skip_frames = int(30 * sampling_interval)

    print(f'video saved at: {video}, fps:{fps}, skip_frames: {skip_frames}')
    # extract video frames at given sampling interval with multiprocessing - 
    n_workers = min(os.cpu_count(), 12)
    
    print(f'now extracting frames with {n_workers} process...')

    with Pool(n_workers) as pool:
        pool.map(partial(process_video_parallel, video, skip_frames, dest_path, n_workers), range(n_workers))
    return(skip_frames, dest_path)



def captioned_strip(images, caption=None, times=None, rows=1):
    increased_h = 0 if caption is None else 30
    w, h = images[0].size[0], images[0].size[1]
    img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h))
    for i, img_ in enumerate(images):
        img.paste(img_, (i // rows * w, increased_h + (i % rows) * h))
    if caption is not None:
        draw = ImageDraw.Draw(img)
        font = ImageFont.truetype(
            "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 16
        )
        font_small = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 12)
        draw.text((60, 3), caption, (255, 255, 255), font=font)
        for i,ts in enumerate(times):
          draw.text((
              (i % rows) * w + 40 , #column poistion
               i // rows * h  + 33) # row position
          , ts, 
          (255, 255, 255), font=font_small)
    return img

def run_inference(url, sampling_interval, search_query, bs=526):
    skip_frames, path_frames= vid2frames(url,sampling_interval)
    filenames = sorted(path_frames.glob('*.jpg'),key=lambda p: int(p.stem))
    n_frames = len(filenames)
    bs = min(n_frames,bs)
    print(f"extracted {n_frames} frames, now encoding images")
    # encoding images one batch at a time, combine all batch outputs -> image_features, size n_frames x 512
    image_features = torch.empty(size=(n_frames, 512)).to(device)
    print(f"batch size :{bs} ; number of batches: {len(range(0, n_frames,bs))}")
    for b in range(0, n_frames,bs):
        images = []
        # loop through all frames in the batch -> create batch_image_input, size bs x 3 x 224 x 224
        for filename in filenames[b:b+bs]:
            image = Image.open(filename).convert("RGB")
            images.append(preprocess(image))
        batch_image_input = torch.tensor(np.stack(images)).to(device)
        # encoding batch_image_input -> batch_image_features
        with torch.no_grad():
            batch_image_features = model.encode_image(batch_image_input)
            batch_image_features /= batch_image_features.norm(dim=-1, keepdim=True)
        # add encoded image embedding to image_features
        image_features[b:b+bs] = batch_image_features
    # encoding search query
    with torch.no_grad():
        text_features = model.encode_text(clip.tokenize(search_query).to(device))
        text_features /= text_features.norm(dim=-1, keepdim=True)
    print(image_features.dtype, text_features.dtype)
    similarity = (100.0 * image_features @ text_features.T)
    values, indices = similarity.topk(4, dim=0)
  
    best_frames = [Image.open(filenames[ind]).convert("RGB") for ind in indices]
    times = [f'{datetime.timedelta(seconds = ind[0].item() * sampling_interval)}' for ind in indices]
    image_output = captioned_strip(best_frames,search_query, times,2)
    title = search_query
    return(title, image_output)

inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video!"),
          gr.Number(5,label='sampling interval (seconds)'),
          gr.inputs.Textbox(label="What do you want to search?")]
outputs = [
    gr.outputs.HTML(label=""),  # To be used as title
    gr.outputs.Image(label=""),
]

gr.Interface(
    run_inference,
    inputs=inputs,
    outputs=outputs,
    title="It Happened One Frame",
    description='A CLIP-based app that search video frame based on text',
    examples=[
        ['https://youtu.be/v1rkzUIL8oc', 1, "James Cagney dancing down the stairs"],
        ['https://youtu.be/k4R5wZs8cxI', 1, "James Cagney smashes a grapefruit into Mae Clarke's face"],
        ['https://youtu.be/0diCvgWv_ng', 1, "little Deborah practicing her ballet while wearing a tutu in empty restaurant"]
    ]
).launch(debug=True,enable_queue=True)