embodied2 / extract_frames.py
Cusyoung's picture
Add files using upload-large-folder tool
f29213e verified
import os
import tqdm
import csv
import re
import glob
import json
import random
import numpy as np
import imageio
import argparse
import io
seed = 0
random.seed(seed)
np.random.seed(seed)
QA_template = """
Select the best answer to the following multiple-choice question based on the video. Respond with only the letter (A, B, C, or D) of the correct option.
Considering the progress shown in the video and my current observation in the last frame, what action should I take next in order to {}?
A. {}
B. {}
C. {}
D. {}
"""
# from petrel_client.client import Client
# client = Client()
import tempfile
from tqdm import tqdm
def extract_characters_regex(s):
# https://github.com/thanku-all/parse_answer/blob/main/eval_your_results.py
s = s.strip()
answer_prefixes = [
"The best answer is",
"The correct answer is",
"The answer is",
"The answer",
"The best option is"
"The correct option is",
"Best answer:"
"Best option:",
"Answer:",
"Option:",
"The correct answer",
"The correct option",
]
for answer_prefix in answer_prefixes:
s = s.replace(answer_prefix, "")
if len(s.split()) > 10 and not re.search("[ABCD]", s):
return ""
matches = re.search(r'[ABCD]', s)
if matches is None:
return ""
return matches[0]
def cut_keyframes(video_dir, video_id, start_frame_id, end_frame_id, frame_number, keyframes_dir):
frame_idx = np.linspace(start_frame_id, end_frame_id, frame_number, endpoint=True, dtype=int)
print(f"start frame id: {start_frame_id}, end frame id: {end_frame_id}, sampled frames: {frame_idx}")
# video_bytes = client.get()
# try:
video_path = os.path.join(video_dir, video_id.split('_')[0], video_id +'.MP4')
if os.path.exists(video_path):
clip = imageio.get_reader(video_path)
if not os.path.exists(os.path.join(keyframes_dir, video_id, f"{end_frame_id}")):
os.makedirs(os.path.join(keyframes_dir, video_id, f"{end_frame_id}"))
for idx, frame_id in enumerate(frame_idx):
frame = clip.get_data(frame_id)
imageio.imwrite(os.path.join(keyframes_dir, video_id, f"{end_frame_id}", f'frame-{idx}_frameID-{frame_id}.png'), frame)
# except:
# print(video_id)
def cut_video_clip(video_dir, qa_id, start_frame_id, end_frame_id, clip_dir):
if not os.path.exists(clip_dir):
os.makedirs(clip_dir)
clip = imageio.get_reader(os.path.join(video_dir, qa_id.split('_')[0]+'.mp4'))
fps = clip.get_meta_data()['fps']
writer = imageio.get_writer(os.path.join(clip_dir, qa_id+'.mp4'), fps=fps)
for i in range(start_frame_id, end_frame_id + 1):
frame = clip.get_data(i)
writer.append_data(frame)
writer.close()
import concurrent.futures
def run_inference(model, input_type, qa_anno, video_dir, output_dir, clip_dir, keyframes_dir, frame_number):
count, correct = 0, 0
output_f = open(os.path.join(output_dir), "a")
def extract_frames(qa_item):
video_id = qa_item['video_id']
qa_id = qa_item['sample_id']
end_frame_id = qa_item['current_observation_frame']
if len(qa_item['task_progress_metadata']) > 0:
start_frame_id = qa_item['task_progress_metadata'][0]['start_frame']
else:
start_frame_id = max(end_frame_id - 500, 0)
if input_type == 'video':
visual_input = os.path.join(clip_dir, qa_id+'.mp4')
if not os.path.exists(visual_input):
cut_video_clip(video_dir, qa_id, start_frame_id, end_frame_id, clip_dir)
elif input_type == 'image':
if not os.path.exists(os.path.join(keyframes_dir, video_id, f"{end_frame_id}")):
cut_keyframes(video_dir, video_id, start_frame_id, end_frame_id, frame_number, keyframes_dir)
# for qa_item in tqdm(qa_anno):
# extract_frames(qa_item)
# break
# # 使用 ThreadPoolExecutor 并行处理
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: # 调整 max_workers 根据你的CPU核心数
futures = {executor.submit(extract_frames, qa_item): qa_item for qa_item in qa_anno}
for future in concurrent.futures.as_completed(futures):
file = futures[future]
try:
future.result()
except Exception as exc:
print(f"{file} generated an exception: {exc}")
if __name__ == '__main__':
model, input_type = None, "image"
qa_anno = json.load(open("EgoPlan_validation.json"))
video_dir = "/mnt/petrelfs/share_data/haohaoran/Epic_Kitchen_100/extracted_video_files/3h91syskeag572hl6tvuovwv4d/videos/test"
output_dir = "output"
clip_dir = 'clip_dir'
keyframes_dir = 'frames'
frame_number = 16
run_inference(model, input_type, qa_anno, video_dir, output_dir, clip_dir, keyframes_dir, frame_number)