|
|
import pandas as pd |
|
|
import cv2 |
|
|
import os |
|
|
from pathlib import Path |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from threading import Lock |
|
|
import time |
|
|
|
|
|
class VideoProcessor: |
|
|
def __init__(self, max_workers=4): |
|
|
self.max_workers = max_workers |
|
|
self.progress_lock = Lock() |
|
|
self.processed_count = 0 |
|
|
self.total_count = 0 |
|
|
|
|
|
def get_video_properties(self, video_path): |
|
|
""" |
|
|
获取视频的基本属性:高度、宽度、帧率 |
|
|
|
|
|
Args: |
|
|
video_path (str): 视频文件路径 |
|
|
|
|
|
Returns: |
|
|
tuple: (height, width, fps) 或 (None, None, None) 如果读取失败 |
|
|
""" |
|
|
try: |
|
|
|
|
|
cap = cv2.VideoCapture(video_path) |
|
|
|
|
|
if not cap.isOpened(): |
|
|
return None, None, None |
|
|
|
|
|
|
|
|
filename = os.path.splitext(os.path.basename(video_path))[0] |
|
|
parts = filename.split('_') |
|
|
num_frame = int(parts[-1]) - int(parts[-2]) |
|
|
|
|
|
|
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
|
fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
|
|
|
|
|
|
cap.release() |
|
|
|
|
|
return num_frame, height, width, fps |
|
|
|
|
|
except Exception as e: |
|
|
print(f"读取视频 {video_path} 时出错: {str(e)}") |
|
|
return None, None, None |
|
|
|
|
|
def process_single_video(self, args): |
|
|
""" |
|
|
处理单个视频文件 |
|
|
|
|
|
Args: |
|
|
args: (idx, video_file, video_dir) |
|
|
|
|
|
Returns: |
|
|
tuple: (idx, num_frame, height, width, fps, success, message) |
|
|
""" |
|
|
idx, video_file, video_dir = args |
|
|
video_path = os.path.join(video_dir, video_file) |
|
|
|
|
|
|
|
|
if not os.path.exists(video_path): |
|
|
message = f"视频文件不存在: {video_path}" |
|
|
return idx, None, None, None, False, message |
|
|
|
|
|
|
|
|
num_frame, height, width, fps = self.get_video_properties(video_path) |
|
|
|
|
|
|
|
|
with self.progress_lock: |
|
|
self.processed_count += 1 |
|
|
progress = (self.processed_count / self.total_count) * 100 |
|
|
|
|
|
if height is not None: |
|
|
message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file} → {num_frame}, {width}x{height}, {fps:.2f}fps" |
|
|
success = True |
|
|
fps = round(fps, 2) |
|
|
else: |
|
|
message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file} → 获取信息失败" |
|
|
success = False |
|
|
|
|
|
print(message) |
|
|
|
|
|
return idx, num_frame, height, width, fps, success, message |
|
|
|
|
|
def process_video_csv(self, csv_path, video_dir="./", output_csv_path=None, max_workers=None): |
|
|
""" |
|
|
多线程处理CSV文件,添加视频的height、width、fps信息 |
|
|
|
|
|
Args: |
|
|
csv_path (str): 输入CSV文件路径 |
|
|
video_dir (str): 视频文件所在目录 |
|
|
output_csv_path (str): 输出CSV文件路径,如果为None则覆盖原文件 |
|
|
max_workers (int): 最大线程数,如果为None则使用初始化时的值 |
|
|
""" |
|
|
if max_workers is None: |
|
|
max_workers = self.max_workers |
|
|
|
|
|
try: |
|
|
|
|
|
df = pd.read_csv(csv_path) |
|
|
self.total_count = len(df) |
|
|
self.processed_count = 0 |
|
|
|
|
|
print(f"成功读取CSV文件,共 {len(df)} 行数据") |
|
|
print(f"使用 {max_workers} 个线程进行处理...") |
|
|
|
|
|
|
|
|
df['num_frame'] = None |
|
|
df['height'] = None |
|
|
df['width'] = None |
|
|
df['fps'] = None |
|
|
|
|
|
|
|
|
tasks = [(idx, row['videoFile'], video_dir) for idx, row in df.iterrows()] |
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor: |
|
|
|
|
|
future_to_task = {executor.submit(self.process_single_video, task): task for task in tasks} |
|
|
|
|
|
|
|
|
for future in as_completed(future_to_task): |
|
|
idx, num_frame, height, width, fps, success, message = future.result() |
|
|
|
|
|
|
|
|
if success and height is not None: |
|
|
df.at[idx, 'num_frame'] = num_frame |
|
|
df.at[idx, 'height'] = height |
|
|
df.at[idx, 'width'] = width |
|
|
df.at[idx, 'fps'] = fps |
|
|
|
|
|
|
|
|
end_time = time.time() |
|
|
processing_time = end_time - start_time |
|
|
|
|
|
|
|
|
if output_csv_path is None: |
|
|
output_csv_path = csv_path |
|
|
|
|
|
df.to_csv(output_csv_path, index=False) |
|
|
|
|
|
|
|
|
valid_videos = df['height'].notna().sum() |
|
|
print(f"\n{'='*60}") |
|
|
print(f"处理完成!") |
|
|
print(f"总处理时间: {processing_time:.2f}秒") |
|
|
print(f"平均每个视频: {processing_time/len(df):.2f}秒") |
|
|
print(f"成功处理视频数量: {valid_videos}/{len(df)}") |
|
|
print(f"结果已保存到: {output_csv_path}") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
return df |
|
|
|
|
|
except Exception as e: |
|
|
print(f"处理过程中出错: {str(e)}") |
|
|
return None |
|
|
|
|
|
|
|
|
def process_video_csv_multithread(csv_path, video_dir="./", output_csv_path=None, max_workers=4): |
|
|
""" |
|
|
便捷的多线程视频处理函数 |
|
|
|
|
|
Args: |
|
|
csv_path (str): 输入CSV文件路径 |
|
|
video_dir (str): 视频文件所在目录 |
|
|
output_csv_path (str): 输出CSV文件路径 |
|
|
max_workers (int): 最大线程数 |
|
|
""" |
|
|
processor = VideoProcessor(max_workers=max_workers) |
|
|
return processor.process_video_csv(csv_path, video_dir, output_csv_path, max_workers) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_names = ["sekai-game-walking-386"] |
|
|
|
|
|
for base_name in base_names: |
|
|
csv_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/{base_name}.csv" |
|
|
video_directory = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/{base_name}" |
|
|
output_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/{base_name}_updated.csv" |
|
|
thread_count = 32 |
|
|
|
|
|
|
|
|
result_df = process_video_csv_multithread( |
|
|
csv_path=csv_file_path, |
|
|
video_dir=video_directory, |
|
|
output_csv_path=output_file_path, |
|
|
max_workers=thread_count |
|
|
) |
|
|
|
|
|
|
|
|
""" |
|
|
processor = VideoProcessor(max_workers=thread_count) |
|
|
result_df = processor.process_video_csv( |
|
|
csv_path=csv_file_path, |
|
|
video_dir=video_directory, |
|
|
output_csv_path=output_file_path |
|
|
) |
|
|
""" |
|
|
|
|
|
|
|
|
if result_df is not None: |
|
|
print("\n处理后的数据预览:") |
|
|
print(result_df[['videoFile', 'num_frame', 'height', 'width', 'fps']].head()) |
|
|
|
|
|
|
|
|
print(f"\n视频分辨率统计:") |
|
|
resolution_stats = result_df.groupby(['width', 'height']).size().reset_index(name='count') |
|
|
print(resolution_stats.head(10)) |
|
|
|