Fang Yunhao commited on
Commit
5c96ebf
·
1 Parent(s): 6f3d269

Upgrade evaluation script.

Browse files
Files changed (1) hide show
  1. evaluation.py +180 -131
evaluation.py CHANGED
@@ -1,157 +1,206 @@
1
- import os, llava, argparse
 
 
 
 
 
 
2
  import numpy as np
3
  from mmengine import load, dump
4
  from tqdm import tqdm
5
  from collections import defaultdict
6
 
7
 
8
- PROMPT_TEMPLATES = {
9
- "instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.",
10
- "physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".',
11
- "common_sense": 'Does the video exhibit \'{common_sense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".',
12
- }
13
-
14
- QUESTION_POOL = {
15
- "instruction": None,
16
- "physical_laws": [
17
- "Violation of Newton's Law: Objects move without any external force.",
18
- "Violation of the Law of Conservation of Mass or Solid Constitutive Law: Objects deform or distort irregularly.",
19
- "Violation of Fluid Constitutive Law: Liquids flow in an unnatural or irregular manner.",
20
- "Violation of Non-physical Penetration: Objects unnaturally pass through each other.",
21
- "Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.",
22
- ],
23
- "common_sense": [
24
- "Poor Aesthetics: Visually unappealing or low-quality content.",
25
- "Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.",
26
- ],
27
- }
28
 
29
- if __name__ == "__main__":
30
- parser = argparse.ArgumentParser(
31
- description="Script for evaluating the WorldModelBenchmark."
32
- )
33
- parser.add_argument(
34
- "--judge",
35
- type=str,
36
- help="Path to judge model checkpoint.",
37
- )
38
- parser.add_argument(
39
- "--video_dir",
40
- type=str,
41
- help="Path to the generated video directory.",
42
- )
43
- parser.add_argument(
44
- "--save_name",
45
- type=str,
46
- help="Path to save evaluation results.",
47
- )
48
- parser.add_argument(
49
- "--cot", action="store_true", help="Enable or disable Chain-of-Thought output."
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  args = parser.parse_args()
 
 
 
 
52
 
 
 
 
 
 
53
  validation_set = load("./worldmodelbench.json")
54
- if args.cot:
55
- args.save_name += "_cot"
56
- results = None
57
- if os.path.exists(args.save_name):
58
- results = load(args.save_name)
59
  try:
60
- preds = results["preds"]
61
- accs = results["accs"]
62
- except:
63
- raise "Expected keys are not found in the results."
64
  else:
65
- model = llava.load(args.judge)
66
-
67
- preds = dict()
68
  accs = defaultdict(list)
 
69
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
70
- ## Load video
71
- video_name = v_i["first_frame"].split("/")[-1].split(".")[0]
72
- video = os.path.join(args.video_dir, video_name + ".mp4")
73
- if not os.path.exists(video):
74
  continue
75
- video = llava.Video(video)
76
- ## Traverse criterions
77
- for k in ["instruction", "physical_laws", "common_sense"]:
78
  preds_i = []
79
- prompt_template = PROMPT_TEMPLATES[k]
80
- qs = QUESTION_POOL[k]
81
- if qs is not None:
 
82
  accs_i = []
83
- for q in qs:
84
- if k == "physical_laws":
85
- text_prompt = prompt_template.format(
86
- physical_laws=q.lower()
87
- )
88
- else:
89
- text_prompt = prompt_template.format(common_sense=q.lower())
90
- if not args.cot:
91
- text_prompt = text_prompt.replace(
92
- "Let's think step-by-step and conclude with",
93
- "Answer with",
94
- ).replace(
95
- "Let's analyze step-by-step and conclude with",
96
- "Answer with",
97
- )
98
- pred = model.generate_content([video, text_prompt])
99
  preds_i.append(pred)
100
- ## Always ask for violations, so a "No" is preferred!
101
  accs_i.append("no" in pred.lower())
102
- accs[k].extend(accs_i)
103
  else:
104
- text_prompt = prompt_template.format(
105
- instruction=v_i["text_instruction"]
106
- )
107
- if not args.cot:
108
- text_prompt = text_prompt.replace(
109
- "Let's think step-by-step and conclude with", "Answer with"
110
- ).replace(
111
- "Let's analyze step-by-step and conclude with",
112
- "Answer with",
113
- )
114
- pred = model.generate_content([video, text_prompt])
115
  preds_i.append(pred)
116
  try:
117
  score = float(pred.split(":")[-1].strip(" ."))
118
- except:
 
119
  score = 0
120
- accs[k].append(score)
 
121
  if video_name not in preds:
122
- preds[video_name] = dict()
123
- preds[video_name][k] = preds_i
124
- ## Save results
125
- # if results is None:
126
- # results = {"preds": preds, "accs": accs}
127
- # dump(results, f"./{args.save_name}.json", indent=4)
128
- ## Print results
129
- num_insts = len(preds)
130
- total_score = 0
131
- for k, v in accs.items():
132
- print(k + " details:")
133
- num_sub = len(v) // num_insts
134
- if num_sub == 1:
135
- print(f"-- overall score: {np.mean(v):.2f}.")
136
- total_score += np.mean(v)
137
- elif num_sub == 2:
138
- sub_scores = []
139
- for i, sub in enumerate(["framewise", "temporal"]):
140
- print(f"-- {sub} score: {np.mean(v):.2f}.")
141
- sub_scores.append(np.mean(v))
142
- print(f"-- overall score: {np.mean(sub_scores):.2f}.")
143
- total_score += np.mean(sub_scores)
144
- elif num_sub == 5:
145
- sub_scores = []
146
- for i, sub in enumerate(
147
- ["newton", "mass", "fluid", "penetration", "gravity"]
148
- ):
149
- print(f"-- {sub} score: {np.mean(v):.2f}.")
150
- sub_scores.append(np.mean(v))
151
- print(f"-- overall score: {np.mean(sub_scores):.2f}.")
152
- total_score += np.mean(sub_scores)
153
- else:
154
- raise ValueError("Unexpected number of subcategories!")
155
-
156
- print(f"\ntotal score: {total_score:.2f}.")
157
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ from pathlib import Path
4
+ from typing import Dict, List, Optional, Union
5
+ import logging
6
+ import os
7
+
8
  import numpy as np
9
  from mmengine import load, dump
10
  from tqdm import tqdm
11
  from collections import defaultdict
12
 
13
 
14
+ class EvaluationType(Enum):
15
+ INSTRUCTION = "instruction"
16
+ PHYSICAL_LAWS = "physical_laws"
17
+ COMMON_SENSE = "common_sense"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+
20
+ @dataclass
21
+ class EvaluationConfig:
22
+ """Configuration for evaluation prompts and scoring criteria."""
23
+
24
+ PROMPT_TEMPLATES: Dict[str, str] = {
25
+ EvaluationType.INSTRUCTION.value: """
26
+ Evaluate if this video follows the instruction: '{instruction}'.
27
+ Use the following scoring criteria:
28
+
29
+ - 0: The video does not follow the instruction at all.
30
+ - 1: The video includes the correct object but performs the wrong action, or vice versa.
31
+ - 2: The video follows the instruction and shows a tendency toward the intended goal.
32
+ - 3: The video follows the instruction precisely and successfully achieves the goal.
33
+
34
+ Let's analyze step-by-step and conclude with 'Score: [score]'.
35
+ """.strip(),
36
+
37
+ EvaluationType.PHYSICAL_LAWS.value: """
38
+ Watch the video and determine if it shows any '{physical_laws}'
39
+ Let's think step-by-step and conclude with "Yes" or "No".
40
+ """.strip(),
41
+
42
+ EvaluationType.COMMON_SENSE.value: """
43
+ Does the video exhibit '{common_sense}'?
44
+ Let's think step-by-step and conclude with "Yes" or "No".
45
+ """.strip(),
46
+ }
47
+
48
+ QUESTION_POOL: Dict[str, Optional[List[str]]] = {
49
+ EvaluationType.INSTRUCTION.value: None,
50
+ EvaluationType.PHYSICAL_LAWS.value: [
51
+ "Violation of Newton's Law: Objects move without any external force.",
52
+ "Violation of the Law of Conservation of Mass or Solid Constitutive Law: Objects deform irregularly.",
53
+ "Violation of Fluid Constitutive Law: Liquids flow in an unnatural manner.",
54
+ "Violation of Non-physical Penetration: Objects unnaturally pass through each other.",
55
+ "Violation of Gravity: Objects behave inconsistently with gravity.",
56
+ ],
57
+ EvaluationType.COMMON_SENSE.value: [
58
+ "Poor Aesthetics: Visually unappealing or low-quality content.",
59
+ "Temporal Inconsistency: Noticeable flickering or abrupt changes.",
60
+ ],
61
+ }
62
+
63
+
64
+ class WorldModelEvaluator:
65
+ """Evaluates world model benchmark videos using LLaVA model."""
66
+
67
+ def __init__(self, judge_path: str, video_dir: str, config: EvaluationConfig):
68
+ self.judge = self._load_judge(judge_path)
69
+ self.video_dir = Path(video_dir)
70
+ self.config = config
71
+ self.logger = logging.getLogger(__name__)
72
+
73
+ @staticmethod
74
+ def _load_judge(judge_path: str):
75
+ """Load the LLaVA judge model."""
76
+ import llava
77
+ return llava.load(judge_path)
78
+
79
+ def _load_video(self, video_name: str) -> Optional['llava.Video']:
80
+ """Load a video file for evaluation."""
81
+ video_path = self.video_dir / f"{video_name}.mp4"
82
+ if not video_path.exists():
83
+ self.logger.warning(f"Video not found: {video_path}")
84
+ return None
85
+ import llava
86
+ return llava.Video(str(video_path))
87
+
88
+ def evaluate_video(self, video: 'llava.Video', prompt: str, cot: bool = True) -> str:
89
+ """Generate evaluation content for a video."""
90
+ if not cot:
91
+ prompt = prompt.replace(
92
+ "Let's think step-by-step and conclude with", "Answer with"
93
+ ).replace(
94
+ "Let's analyze step-by-step and conclude with", "Answer with"
95
+ )
96
+ return self.judge.generate_content([video, prompt])
97
+
98
+ def process_results(self, preds: Dict, accs: defaultdict) -> float:
99
+ """Process and print evaluation results."""
100
+ num_insts = len(preds)
101
+ total_score = 0
102
+
103
+ category_mapping = {
104
+ 2: [("framewise", "temporal")],
105
+ 5: [("newton", "mass", "fluid", "penetration", "gravity")],
106
+ }
107
+
108
+ for category, scores in accs.items():
109
+ print(f"\n{category} details:")
110
+ num_sub = len(scores) // num_insts
111
+
112
+ if num_sub == 1:
113
+ mean_score = np.mean(scores)
114
+ print(f"-- overall score: {mean_score:.2f}")
115
+ total_score += mean_score
116
+ elif num_sub in category_mapping:
117
+ sub_scores = []
118
+ for i, sub in enumerate(category_mapping[num_sub][0]):
119
+ sub_mean = np.mean(scores[i::num_sub])
120
+ print(f"-- {sub} score: {sub_mean:.2f}")
121
+ sub_scores.append(sub_mean)
122
+ overall_mean = np.mean(sub_scores)
123
+ print(f"-- overall score: {overall_mean:.2f}")
124
+ total_score += overall_mean
125
+ else:
126
+ raise ValueError(f"Unexpected number of subcategories: {num_sub}")
127
+
128
+ return total_score
129
+
130
+ def main():
131
+ import argparse
132
+
133
+ parser = argparse.ArgumentParser(description="Evaluate World Model Benchmark")
134
+ parser.add_argument("--judge", type=str, required=True, help="Path to judge model checkpoint")
135
+ parser.add_argument("--video_dir", type=str, required=True, help="Path to generated video directory")
136
+ parser.add_argument("--save_name", type=str, required=True, help="Path to save evaluation results")
137
+ parser.add_argument("--cot", action="store_true", help="Enable Chain-of-Thought output")
138
+
139
  args = parser.parse_args()
140
+
141
+ # Setup logging
142
+ logging.basicConfig(level=logging.INFO)
143
+ logger = logging.getLogger(__name__)
144
 
145
+ # Initialize evaluator
146
+ config = EvaluationConfig()
147
+ evaluator = WorldModelEvaluator(args.judge, args.video_dir, config)
148
+
149
+ # Load validation set
150
  validation_set = load("./worldmodelbench.json")
151
+
152
+ # Check for existing results
153
+ save_path = f"{args.save_name}_cot" if args.cot else args.save_name
154
+ if os.path.exists(save_path):
155
+ results = load(save_path)
156
  try:
157
+ preds, accs = results["preds"], results["accs"]
158
+ except KeyError:
159
+ raise KeyError("Expected keys not found in results file")
 
160
  else:
161
+ preds = {}
 
 
162
  accs = defaultdict(list)
163
+
164
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
165
+ video_name = Path(v_i["first_frame"]).stem
166
+ video = evaluator._load_video(video_name)
167
+ if not video:
 
168
  continue
169
+
170
+ for eval_type in EvaluationType:
 
171
  preds_i = []
172
+ prompt_template = config.PROMPT_TEMPLATES[eval_type.value]
173
+ questions = config.QUESTION_POOL[eval_type.value]
174
+
175
+ if questions:
176
  accs_i = []
177
+ for question in questions:
178
+ format_kwargs = {
179
+ f"{eval_type.value}": question.lower()
180
+ }
181
+ prompt = prompt_template.format(**format_kwargs)
182
+ pred = evaluator.evaluate_video(video, prompt, args.cot)
 
 
 
 
 
 
 
 
 
 
183
  preds_i.append(pred)
 
184
  accs_i.append("no" in pred.lower())
185
+ accs[eval_type.value].extend(accs_i)
186
  else:
187
+ prompt = prompt_template.format(instruction=v_i["text_instruction"])
188
+ pred = evaluator.evaluate_video(video, prompt, args.cot)
 
 
 
 
 
 
 
 
 
189
  preds_i.append(pred)
190
  try:
191
  score = float(pred.split(":")[-1].strip(" ."))
192
+ except ValueError:
193
+ logger.warning(f"Could not parse score from prediction: {pred}")
194
  score = 0
195
+ accs[eval_type.value].append(score)
196
+
197
  if video_name not in preds:
198
+ preds[video_name] = {}
199
+ preds[video_name][eval_type.value] = preds_i
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
+ # Process and display results
202
+ total_score = evaluator.process_results(preds, accs)
203
+ print(f"\nTotal score: {total_score:.2f}")
204
+
205
+ if __name__ == "__main__":
206
+ main()