Fang Yunhao commited on
Commit
5561cce
·
1 Parent(s): 5c96ebf

Formatted.

Browse files
Files changed (1) hide show
  1. evaluation.py +35 -27
evaluation.py CHANGED
@@ -20,7 +20,7 @@ class EvaluationType(Enum):
20
  @dataclass
21
  class EvaluationConfig:
22
  """Configuration for evaluation prompts and scoring criteria."""
23
-
24
  PROMPT_TEMPLATES: Dict[str, str] = {
25
  EvaluationType.INSTRUCTION.value: """
26
  Evaluate if this video follows the instruction: '{instruction}'.
@@ -33,12 +33,10 @@ class EvaluationConfig:
33
 
34
  Let's analyze step-by-step and conclude with 'Score: [score]'.
35
  """.strip(),
36
-
37
  EvaluationType.PHYSICAL_LAWS.value: """
38
  Watch the video and determine if it shows any '{physical_laws}'
39
  Let's think step-by-step and conclude with "Yes" or "No".
40
  """.strip(),
41
-
42
  EvaluationType.COMMON_SENSE.value: """
43
  Does the video exhibit '{common_sense}'?
44
  Let's think step-by-step and conclude with "Yes" or "No".
@@ -63,7 +61,7 @@ class EvaluationConfig:
63
 
64
  class WorldModelEvaluator:
65
  """Evaluates world model benchmark videos using LLaVA model."""
66
-
67
  def __init__(self, judge_path: str, video_dir: str, config: EvaluationConfig):
68
  self.judge = self._load_judge(judge_path)
69
  self.video_dir = Path(video_dir)
@@ -74,32 +72,34 @@ class WorldModelEvaluator:
74
  def _load_judge(judge_path: str):
75
  """Load the LLaVA judge model."""
76
  import llava
 
77
  return llava.load(judge_path)
78
 
79
- def _load_video(self, video_name: str) -> Optional['llava.Video']:
80
  """Load a video file for evaluation."""
81
  video_path = self.video_dir / f"{video_name}.mp4"
82
  if not video_path.exists():
83
  self.logger.warning(f"Video not found: {video_path}")
84
  return None
85
  import llava
 
86
  return llava.Video(str(video_path))
87
 
88
- def evaluate_video(self, video: 'llava.Video', prompt: str, cot: bool = True) -> str:
 
 
89
  """Generate evaluation content for a video."""
90
  if not cot:
91
  prompt = prompt.replace(
92
  "Let's think step-by-step and conclude with", "Answer with"
93
- ).replace(
94
- "Let's analyze step-by-step and conclude with", "Answer with"
95
- )
96
  return self.judge.generate_content([video, prompt])
97
 
98
  def process_results(self, preds: Dict, accs: defaultdict) -> float:
99
  """Process and print evaluation results."""
100
  num_insts = len(preds)
101
  total_score = 0
102
-
103
  category_mapping = {
104
  2: [("framewise", "temporal")],
105
  5: [("newton", "mass", "fluid", "penetration", "gravity")],
@@ -108,7 +108,7 @@ class WorldModelEvaluator:
108
  for category, scores in accs.items():
109
  print(f"\n{category} details:")
110
  num_sub = len(scores) // num_insts
111
-
112
  if num_sub == 1:
113
  mean_score = np.mean(scores)
114
  print(f"-- overall score: {mean_score:.2f}")
@@ -127,17 +127,26 @@ class WorldModelEvaluator:
127
 
128
  return total_score
129
 
 
130
  def main():
131
  import argparse
132
-
133
  parser = argparse.ArgumentParser(description="Evaluate World Model Benchmark")
134
- parser.add_argument("--judge", type=str, required=True, help="Path to judge model checkpoint")
135
- parser.add_argument("--video_dir", type=str, required=True, help="Path to generated video directory")
136
- parser.add_argument("--save_name", type=str, required=True, help="Path to save evaluation results")
137
- parser.add_argument("--cot", action="store_true", help="Enable Chain-of-Thought output")
138
-
 
 
 
 
 
 
 
 
139
  args = parser.parse_args()
140
-
141
  # Setup logging
142
  logging.basicConfig(level=logging.INFO)
143
  logger = logging.getLogger(__name__)
@@ -145,10 +154,10 @@ def main():
145
  # Initialize evaluator
146
  config = EvaluationConfig()
147
  evaluator = WorldModelEvaluator(args.judge, args.video_dir, config)
148
-
149
  # Load validation set
150
  validation_set = load("./worldmodelbench.json")
151
-
152
  # Check for existing results
153
  save_path = f"{args.save_name}_cot" if args.cot else args.save_name
154
  if os.path.exists(save_path):
@@ -160,24 +169,22 @@ def main():
160
  else:
161
  preds = {}
162
  accs = defaultdict(list)
163
-
164
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
165
  video_name = Path(v_i["first_frame"]).stem
166
  video = evaluator._load_video(video_name)
167
  if not video:
168
  continue
169
-
170
  for eval_type in EvaluationType:
171
  preds_i = []
172
  prompt_template = config.PROMPT_TEMPLATES[eval_type.value]
173
  questions = config.QUESTION_POOL[eval_type.value]
174
-
175
  if questions:
176
  accs_i = []
177
  for question in questions:
178
- format_kwargs = {
179
- f"{eval_type.value}": question.lower()
180
- }
181
  prompt = prompt_template.format(**format_kwargs)
182
  pred = evaluator.evaluate_video(video, prompt, args.cot)
183
  preds_i.append(pred)
@@ -193,7 +200,7 @@ def main():
193
  logger.warning(f"Could not parse score from prediction: {pred}")
194
  score = 0
195
  accs[eval_type.value].append(score)
196
-
197
  if video_name not in preds:
198
  preds[video_name] = {}
199
  preds[video_name][eval_type.value] = preds_i
@@ -202,5 +209,6 @@ def main():
202
  total_score = evaluator.process_results(preds, accs)
203
  print(f"\nTotal score: {total_score:.2f}")
204
 
 
205
  if __name__ == "__main__":
206
  main()
 
20
  @dataclass
21
  class EvaluationConfig:
22
  """Configuration for evaluation prompts and scoring criteria."""
23
+
24
  PROMPT_TEMPLATES: Dict[str, str] = {
25
  EvaluationType.INSTRUCTION.value: """
26
  Evaluate if this video follows the instruction: '{instruction}'.
 
33
 
34
  Let's analyze step-by-step and conclude with 'Score: [score]'.
35
  """.strip(),
 
36
  EvaluationType.PHYSICAL_LAWS.value: """
37
  Watch the video and determine if it shows any '{physical_laws}'
38
  Let's think step-by-step and conclude with "Yes" or "No".
39
  """.strip(),
 
40
  EvaluationType.COMMON_SENSE.value: """
41
  Does the video exhibit '{common_sense}'?
42
  Let's think step-by-step and conclude with "Yes" or "No".
 
61
 
62
  class WorldModelEvaluator:
63
  """Evaluates world model benchmark videos using LLaVA model."""
64
+
65
  def __init__(self, judge_path: str, video_dir: str, config: EvaluationConfig):
66
  self.judge = self._load_judge(judge_path)
67
  self.video_dir = Path(video_dir)
 
72
  def _load_judge(judge_path: str):
73
  """Load the LLaVA judge model."""
74
  import llava
75
+
76
  return llava.load(judge_path)
77
 
78
+ def _load_video(self, video_name: str) -> Optional["llava.Video"]:
79
  """Load a video file for evaluation."""
80
  video_path = self.video_dir / f"{video_name}.mp4"
81
  if not video_path.exists():
82
  self.logger.warning(f"Video not found: {video_path}")
83
  return None
84
  import llava
85
+
86
  return llava.Video(str(video_path))
87
 
88
+ def evaluate_video(
89
+ self, video: "llava.Video", prompt: str, cot: bool = True
90
+ ) -> str:
91
  """Generate evaluation content for a video."""
92
  if not cot:
93
  prompt = prompt.replace(
94
  "Let's think step-by-step and conclude with", "Answer with"
95
+ ).replace("Let's analyze step-by-step and conclude with", "Answer with")
 
 
96
  return self.judge.generate_content([video, prompt])
97
 
98
  def process_results(self, preds: Dict, accs: defaultdict) -> float:
99
  """Process and print evaluation results."""
100
  num_insts = len(preds)
101
  total_score = 0
102
+
103
  category_mapping = {
104
  2: [("framewise", "temporal")],
105
  5: [("newton", "mass", "fluid", "penetration", "gravity")],
 
108
  for category, scores in accs.items():
109
  print(f"\n{category} details:")
110
  num_sub = len(scores) // num_insts
111
+
112
  if num_sub == 1:
113
  mean_score = np.mean(scores)
114
  print(f"-- overall score: {mean_score:.2f}")
 
127
 
128
  return total_score
129
 
130
+
131
  def main():
132
  import argparse
133
+
134
  parser = argparse.ArgumentParser(description="Evaluate World Model Benchmark")
135
+ parser.add_argument(
136
+ "--judge", type=str, required=True, help="Path to judge model checkpoint"
137
+ )
138
+ parser.add_argument(
139
+ "--video_dir", type=str, required=True, help="Path to generated video directory"
140
+ )
141
+ parser.add_argument(
142
+ "--save_name", type=str, required=True, help="Path to save evaluation results"
143
+ )
144
+ parser.add_argument(
145
+ "--cot", action="store_true", help="Enable Chain-of-Thought output"
146
+ )
147
+
148
  args = parser.parse_args()
149
+
150
  # Setup logging
151
  logging.basicConfig(level=logging.INFO)
152
  logger = logging.getLogger(__name__)
 
154
  # Initialize evaluator
155
  config = EvaluationConfig()
156
  evaluator = WorldModelEvaluator(args.judge, args.video_dir, config)
157
+
158
  # Load validation set
159
  validation_set = load("./worldmodelbench.json")
160
+
161
  # Check for existing results
162
  save_path = f"{args.save_name}_cot" if args.cot else args.save_name
163
  if os.path.exists(save_path):
 
169
  else:
170
  preds = {}
171
  accs = defaultdict(list)
172
+
173
  for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
174
  video_name = Path(v_i["first_frame"]).stem
175
  video = evaluator._load_video(video_name)
176
  if not video:
177
  continue
178
+
179
  for eval_type in EvaluationType:
180
  preds_i = []
181
  prompt_template = config.PROMPT_TEMPLATES[eval_type.value]
182
  questions = config.QUESTION_POOL[eval_type.value]
183
+
184
  if questions:
185
  accs_i = []
186
  for question in questions:
187
+ format_kwargs = {f"{eval_type.value}": question.lower()}
 
 
188
  prompt = prompt_template.format(**format_kwargs)
189
  pred = evaluator.evaluate_video(video, prompt, args.cot)
190
  preds_i.append(pred)
 
200
  logger.warning(f"Could not parse score from prediction: {pred}")
201
  score = 0
202
  accs[eval_type.value].append(score)
203
+
204
  if video_name not in preds:
205
  preds[video_name] = {}
206
  preds[video_name][eval_type.value] = preds_i
 
209
  total_score = evaluator.process_results(preds, accs)
210
  print(f"\nTotal score: {total_score:.2f}")
211
 
212
+
213
  if __name__ == "__main__":
214
  main()