{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "ename": "FileNotFoundError", "evalue": "[Errno 2] No such file or directory: 'path/to/your/images'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[1], line 31\u001b[0m\n\u001b[1;32m 28\u001b[0m output_video \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124moutput_video.mp4\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 29\u001b[0m frame_rate \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m30\u001b[39m \u001b[38;5;66;03m# Frames per second\u001b[39;00m\n\u001b[0;32m---> 31\u001b[0m \u001b[43mcreate_video_from_frames\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_directory\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutput_video\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mframe_rate\u001b[49m\u001b[43m)\u001b[49m\n", "Cell \u001b[0;32mIn[1], line 6\u001b[0m, in \u001b[0;36mcreate_video_from_frames\u001b[0;34m(input_dir, output_file, frame_rate)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcreate_video_from_frames\u001b[39m(input_dir, output_file, frame_rate):\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# Get all image files from the input directory\u001b[39;00m\n\u001b[0;32m----> 6\u001b[0m images \u001b[38;5;241m=\u001b[39m [img \u001b[38;5;28;01mfor\u001b[39;00m img \u001b[38;5;129;01min\u001b[39;00m \u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlistdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_dir\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m img\u001b[38;5;241m.\u001b[39mendswith(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m.png\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m img\u001b[38;5;241m.\u001b[39mendswith(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m.jpg\u001b[39m\u001b[38;5;124m\"\u001b[39m)]\n\u001b[1;32m 7\u001b[0m images\u001b[38;5;241m.\u001b[39msort() \u001b[38;5;66;03m# Ensure the images are sorted\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;66;03m# Read the first image to get the dimensions\u001b[39;00m\n", "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'path/to/your/images'" ] } ], "source": [ "import cv2\n", "import os\n", "\n", "def create_video_from_frames(input_dir, output_file, frame_rate):\n", " # Get all image files from the input directory\n", " images = [img for img in os.listdir(input_dir) if img.endswith(\".png\") or img.endswith(\".jpg\")]\n", " images.sort() # Ensure the images are sorted\n", "\n", " # Read the first image to get the dimensions\n", " frame = cv2.imread(os.path.join(input_dir, images[0]))\n", " height, width, layers = frame.shape\n", "\n", " # Initialize the video writer\n", " fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for .mp4 file\n", " video = cv2.VideoWriter(output_file, fourcc, frame_rate, (width, height))\n", "\n", " # Write each image to the video\n", " for image in images:\n", " img_path = os.path.join(input_dir, image)\n", " frame = cv2.imread(img_path)\n", " video.write(frame)\n", "\n", " # Release the video writer\n", " video.release()\n", "\n", "# Example usage\n", "input_directory = 'path/to/your/images'\n", "output_video = 'output_video.mp4'\n", "frame_rate = 30 # Frames per second\n", "\n", "create_video_from_frames(input_directory, output_video, frame_rate)" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import json\n", "with open(\"./all_subsets.json\", 'r') as f:\n", " examples = json.load(f)" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "15" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(examples)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'r003679_00.jpg'" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "examples[0]['images'][0]" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "dog rolls over and falls off couch\n", "Created video for r003679\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "[ERROR:0@119.424] global cap_ffmpeg_impl.hpp:3130 open Could not find encoder for codec_id=27, error: Encoder not found\n", "[ERROR:0@119.424] global cap_ffmpeg_impl.hpp:3208 open VIDEOIO/FFMPEG: Failed to initialize VideoWriter\n" ] } ], "source": [ "\n", "import os\n", "import cv2\n", "import regex as re\n", "import shutil\n", "for item in examples:\n", " video_id = item['images'][0].split(\"_\")[0]\n", " images = [os.path.join(video_id, x) for x in item['images']]\n", " output_file = f\"{video_id}.mp4\"\n", " frame = cv2.imread(images[0])\n", " if frame is None:\n", " print(f\"Skipping {video_id}\")\n", " continue\n", " height, width, layers = frame.shape\n", " item['video'] = output_file\n", " \n", " pattern = r'(?<=For this item, the text prompt is ).+(?=,)'\n", " prompt = re.findall(pattern, item['conversations'][0]['value'])[0]\n", " print(prompt)\n", " item['prompt'] = prompt.strip()\n", " \n", " # Initialize the video writer\n", " # fourcc = cv2.VideoWriter_fourcc(*'H264') # Codec for .mp4 file|\n", " fourcc = cv2.VideoWriter_fourcc(*'avc3') # Codec for .mp4 file|\n", " frame_rate = 8\n", " video = cv2.VideoWriter(output_file, fourcc, frame_rate, (width, height))\n", "\n", " # Write each image to the video\n", " for image in images:\n", " frame = cv2.imread(image)\n", " video.write(frame)\n", " print(f\"Created video for {video_id}\")\n", " \n", " # shutil.rmtree(video_id)\n", "\n", " # Release the video writer\n", " video.release()\n", " break\n", " " ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [], "source": [ "with open(\"./all_subsets.json\", 'w') as f:\n", " json.dump(examples, f, indent=4)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 'real_r003679',\n", " 'images': ['r003679_00.jpg',\n", " 'r003679_01.jpg',\n", " 'r003679_02.jpg',\n", " 'r003679_03.jpg',\n", " 'r003679_04.jpg',\n", " 'r003679_05.jpg',\n", " 'r003679_06.jpg',\n", " 'r003679_07.jpg',\n", " 'r003679_08.jpg',\n", " 'r003679_09.jpg',\n", " 'r003679_10.jpg',\n", " 'r003679_11.jpg',\n", " 'r003679_12.jpg',\n", " 'r003679_13.jpg',\n", " 'r003679_14.jpg',\n", " 'r003679_15.jpg',\n", " 'r003679_16.jpg',\n", " 'r003679_17.jpg',\n", " 'r003679_18.jpg',\n", " 'r003679_19.jpg',\n", " 'r003679_20.jpg',\n", " 'r003679_21.jpg',\n", " 'r003679_22.jpg',\n", " 'r003679_23.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is dog rolls over and falls off couch,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 3\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 3\\n factual consistency: 3\\n overall score: 3\\n'}]},\n", " {'id': 'real_r100916',\n", " 'images': ['r100916_00.jpg',\n", " 'r100916_01.jpg',\n", " 'r100916_02.jpg',\n", " 'r100916_03.jpg',\n", " 'r100916_04.jpg',\n", " 'r100916_05.jpg',\n", " 'r100916_06.jpg',\n", " 'r100916_07.jpg',\n", " 'r100916_08.jpg',\n", " 'r100916_09.jpg',\n", " 'r100916_10.jpg',\n", " 'r100916_11.jpg',\n", " 'r100916_12.jpg',\n", " 'r100916_13.jpg',\n", " 'r100916_14.jpg',\n", " 'r100916_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is A young man is standing in front of a glacier with mountains in the background.,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 3\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 3\\n factual consistency: 3\\n overall score: 3\\n'}]},\n", " {'id': 'real_r004061',\n", " 'images': ['r004061_00.jpg',\n", " 'r004061_01.jpg',\n", " 'r004061_02.jpg',\n", " 'r004061_03.jpg',\n", " 'r004061_04.jpg',\n", " 'r004061_05.jpg',\n", " 'r004061_06.jpg',\n", " 'r004061_07.jpg',\n", " 'r004061_08.jpg',\n", " 'r004061_09.jpg',\n", " 'r004061_10.jpg',\n", " 'r004061_11.jpg',\n", " 'r004061_12.jpg',\n", " 'r004061_13.jpg',\n", " 'r004061_14.jpg',\n", " 'r004061_15.jpg',\n", " 'r004061_16.jpg',\n", " 'r004061_17.jpg',\n", " 'r004061_18.jpg',\n", " 'r004061_19.jpg',\n", " 'r004061_20.jpg',\n", " 'r004061_21.jpg',\n", " 'r004061_22.jpg',\n", " 'r004061_23.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is a man wearing a striped blanket over his backpack runs behind two other men.,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 3\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 3\\n factual consistency: 3\\n overall score: 3\\n'}]},\n", " {'id': 'worsen_gen_b404675',\n", " 'images': ['b404675_00.jpg',\n", " 'b404675_01.jpg',\n", " 'b404675_02.jpg',\n", " 'b404675_03.jpg',\n", " 'b404675_04.jpg',\n", " 'b404675_05.jpg',\n", " 'b404675_06.jpg',\n", " 'b404675_07.jpg',\n", " 'b404675_08.jpg',\n", " 'b404675_09.jpg',\n", " 'b404675_10.jpg',\n", " 'b404675_11.jpg',\n", " 'b404675_12.jpg',\n", " 'b404675_13.jpg',\n", " 'b404675_14.jpg',\n", " 'b404675_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is After a few years, Ghulam Muhammad became the owner of his business ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 2\\n factual consistency: 3\\n overall score: 1\\n'}]},\n", " {'id': 'worsen_gen_b402727',\n", " 'images': ['b402727_00.jpg',\n", " 'b402727_01.jpg',\n", " 'b402727_02.jpg',\n", " 'b402727_03.jpg',\n", " 'b402727_04.jpg',\n", " 'b402727_05.jpg',\n", " 'b402727_06.jpg',\n", " 'b402727_07.jpg',\n", " 'b402727_08.jpg',\n", " 'b402727_09.jpg',\n", " 'b402727_10.jpg',\n", " 'b402727_11.jpg',\n", " 'b402727_12.jpg',\n", " 'b402727_13.jpg',\n", " 'b402727_14.jpg',\n", " 'b402727_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is view down to the path in the field, moving forward, night, dark. ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 3\\n factual consistency: 3\\n overall score: 1\\n'}]},\n", " {'id': 'worsen_gen_b304986',\n", " 'images': ['b304986_00.jpg',\n", " 'b304986_01.jpg',\n", " 'b304986_02.jpg',\n", " 'b304986_03.jpg',\n", " 'b304986_04.jpg',\n", " 'b304986_05.jpg',\n", " 'b304986_06.jpg',\n", " 'b304986_07.jpg',\n", " 'b304986_08.jpg',\n", " 'b304986_09.jpg',\n", " 'b304986_10.jpg',\n", " 'b304986_11.jpg',\n", " 'b304986_12.jpg',\n", " 'b304986_13.jpg',\n", " 'b304986_14.jpg',\n", " 'b304986_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is Old Philippino man, close up, subtle smile, teats of joy, in old wood room ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 1\\n text-to-video alignment: 2\\n factual consistency: 3\\n overall score: 1\\n'}]},\n", " {'id': 'static_d500937',\n", " 'images': ['d500937_00.jpg',\n", " 'd500937_01.jpg',\n", " 'd500937_02.jpg',\n", " 'd500937_03.jpg',\n", " 'd500937_04.jpg',\n", " 'd500937_05.jpg',\n", " 'd500937_06.jpg',\n", " 'd500937_07.jpg',\n", " 'd500937_08.jpg',\n", " 'd500937_09.jpg',\n", " 'd500937_10.jpg',\n", " 'd500937_11.jpg',\n", " 'd500937_12.jpg',\n", " 'd500937_13.jpg',\n", " 'd500937_14.jpg',\n", " 'd500937_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is write y in the garden with flowers ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 3\\n dynamic degree: 1\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'static_d500506',\n", " 'images': ['d500506_00.jpg',\n", " 'd500506_01.jpg',\n", " 'd500506_02.jpg',\n", " 'd500506_03.jpg',\n", " 'd500506_04.jpg',\n", " 'd500506_05.jpg',\n", " 'd500506_06.jpg',\n", " 'd500506_07.jpg',\n", " 'd500506_08.jpg',\n", " 'd500506_09.jpg',\n", " 'd500506_10.jpg',\n", " 'd500506_11.jpg',\n", " 'd500506_12.jpg',\n", " 'd500506_13.jpg',\n", " 'd500506_14.jpg',\n", " 'd500506_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is a boy paiting a beautiful landscape ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 3\\n dynamic degree: 1\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'static_d401950',\n", " 'images': ['d401950_00.jpg',\n", " 'd401950_01.jpg',\n", " 'd401950_02.jpg',\n", " 'd401950_03.jpg',\n", " 'd401950_04.jpg',\n", " 'd401950_05.jpg',\n", " 'd401950_06.jpg',\n", " 'd401950_07.jpg',\n", " 'd401950_08.jpg',\n", " 'd401950_09.jpg',\n", " 'd401950_10.jpg',\n", " 'd401950_11.jpg',\n", " 'd401950_12.jpg',\n", " 'd401950_13.jpg',\n", " 'd401950_14.jpg',\n", " 'd401950_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is sneaker with exclusive design worth 1 billion ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 3\\n dynamic degree: 1\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'insf_a500251',\n", " 'images': ['a500251_00.jpg',\n", " 'a500251_01.jpg',\n", " 'a500251_02.jpg',\n", " 'a500251_03.jpg',\n", " 'a500251_04.jpg',\n", " 'a500251_05.jpg',\n", " 'a500251_06.jpg',\n", " 'a500251_07.jpg',\n", " 'a500251_08.jpg',\n", " 'a500251_09.jpg',\n", " 'a500251_10.jpg',\n", " 'a500251_11.jpg',\n", " 'a500251_12.jpg',\n", " 'a500251_13.jpg',\n", " 'a500251_14.jpg',\n", " 'a500251_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is :sci fi war in the moon ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'insf_a400480',\n", " 'images': ['a400480_00.jpg',\n", " 'a400480_01.jpg',\n", " 'a400480_02.jpg',\n", " 'a400480_03.jpg',\n", " 'a400480_04.jpg',\n", " 'a400480_05.jpg',\n", " 'a400480_06.jpg',\n", " 'a400480_07.jpg',\n", " 'a400480_08.jpg',\n", " 'a400480_09.jpg',\n", " 'a400480_10.jpg',\n", " 'a400480_11.jpg',\n", " 'a400480_12.jpg',\n", " 'a400480_13.jpg',\n", " 'a400480_14.jpg',\n", " 'a400480_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is for a dog accessories brand a welcome image: brand name: hot doggies ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'insf_a500010',\n", " 'images': ['a500010_00.jpg',\n", " 'a500010_01.jpg',\n", " 'a500010_02.jpg',\n", " 'a500010_03.jpg',\n", " 'a500010_04.jpg',\n", " 'a500010_05.jpg',\n", " 'a500010_06.jpg',\n", " 'a500010_07.jpg',\n", " 'a500010_08.jpg',\n", " 'a500010_09.jpg',\n", " 'a500010_10.jpg',\n", " 'a500010_11.jpg',\n", " 'a500010_12.jpg',\n", " 'a500010_13.jpg',\n", " 'a500010_14.jpg',\n", " 'a500010_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is a druid in the forest cinematic ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 1\\n object consistency: 1\\n dynamic degree: 3\\n motion smoothness: 1\\n text-to-video alignment: 1\\n factual consistency: 1\\n overall score: 1\\n'}]},\n", " {'id': 'lab_3005033',\n", " 'images': ['3005033_00.jpg',\n", " '3005033_01.jpg',\n", " '3005033_02.jpg',\n", " '3005033_03.jpg',\n", " '3005033_04.jpg',\n", " '3005033_05.jpg',\n", " '3005033_06.jpg',\n", " '3005033_07.jpg',\n", " '3005033_08.jpg',\n", " '3005033_09.jpg',\n", " '3005033_10.jpg',\n", " '3005033_11.jpg',\n", " '3005033_12.jpg',\n", " '3005033_13.jpg',\n", " '3005033_14.jpg',\n", " '3005033_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is a robot vacuum traveling through southeast asian countries ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 2\\n dynamic degree: 3\\n motion smoothness: 2\\n text-to-video alignment: 2\\n factual consistency: 3\\n overall score: 2\\n'}]},\n", " {'id': 'lab_7004180',\n", " 'images': ['7004180_00.jpg',\n", " '7004180_01.jpg',\n", " '7004180_02.jpg',\n", " '7004180_03.jpg',\n", " '7004180_04.jpg',\n", " '7004180_05.jpg',\n", " '7004180_06.jpg',\n", " '7004180_07.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is jiaxuan is the most beautiful ,elegant,charming girl in the world. billions of boys fell in love with her ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 3\\n dynamic degree: 3\\n motion smoothness: 3\\n text-to-video alignment: 3\\n factual consistency: 3\\n overall score: 3\\n'}]},\n", " {'id': 'lab_1006309',\n", " 'images': ['1006309_00.jpg',\n", " '1006309_01.jpg',\n", " '1006309_02.jpg',\n", " '1006309_03.jpg',\n", " '1006309_04.jpg',\n", " '1006309_05.jpg',\n", " '1006309_06.jpg',\n", " '1006309_07.jpg',\n", " '1006309_08.jpg',\n", " '1006309_09.jpg',\n", " '1006309_10.jpg',\n", " '1006309_11.jpg',\n", " '1006309_12.jpg',\n", " '1006309_13.jpg',\n", " '1006309_14.jpg',\n", " '1006309_15.jpg'],\n", " 'conversations': [{'from': 'human',\n", " 'value': \"\\nSuppose you are an expert in judging and evaluating the quality of AI-generated videos, \\nplease watch the following frames of a given video and see the text prompt for generating the video, \\nthen give scores from 7 different dimensions:\\n(1) visual quality, \\n(2) object consistency,\\n(3) dynamic degree,\\n(4) motion smoothness,\\n(5) text-to-video alignment,\\n(6) factual consistency, \\n(7) overall score\\nfor each dimension, output a number from [1,2,3], in which '1' stands for 'Bad', '2' stands for 'Average', '3' stands for 'Good'.\\nHere is an output example: \\nvisual quality: 3\\nobject consistency: 2 \\ndynamic degree: 2\\nmotion smoothness: 1\\ntext-to-video alignment: 1\\nfactual consistency: 2\\noverall score: 1\\n\\nFor this item, the text prompt is scary halloween skeletons are having a conversation with each other ,\\nall the frames of video are as follows: \\n\\n \\n\"},\n", " {'from': 'gpt',\n", " 'value': 'visual quality: 3\\n object consistency: 2\\n dynamic degree: 3\\n motion smoothness: 2\\n text-to-video alignment: 3\\n factual consistency: 2\\n overall score: 3\\n'}]}]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "examples" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "e" ] } ], "metadata": { "kernelspec": { "display_name": "mantis", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.19" } }, "nbformat": 4, "nbformat_minor": 2 }