mshuaibi commited on
Commit
5433f8c
·
1 Parent(s): ed02609

space for time

Browse files
Files changed (2) hide show
  1. app.py +2 -1
  2. evaluator.py +13 -40
app.py CHANGED
@@ -461,7 +461,8 @@ def create_evaluation_tabs(results_dfs: Dict[str, pd.DataFrame]) -> None:
461
  """
462
  # Create Overview tab first
463
  overview_df = create_overview_dataframe(results_dfs)
464
- create_dataframe_tab("Overview", overview_df, widths=["10%"])
 
465
 
466
  # Create individual evaluation tabs
467
  for eval_type in OTHER_EVAL_TYPES:
 
461
  """
462
  # Create Overview tab first
463
  overview_df = create_overview_dataframe(results_dfs)
464
+ n_overview_columns = len(overview_df.columns)
465
+ create_dataframe_tab("Overview", overview_df, widths=["20%"]+["10%"]*(n_overview_columns-1))
466
 
467
  # Create individual evaluation tabs
468
  for eval_type in OTHER_EVAL_TYPES:
evaluator.py CHANGED
@@ -5,7 +5,6 @@ from typing import Dict, List, Tuple
5
  import numpy as np
6
  import torch
7
  import json
8
- from fairchem.core.modules.evaluator import Evaluator
9
 
10
  from fairchem.data.omol.modules.evaluator import (
11
  ligand_pocket,
@@ -95,21 +94,12 @@ def s2ef_metrics(
95
  submission_filename: Path,
96
  subsets: list = ["all"],
97
  ) -> Dict[str, float]:
98
- eval_metrics = {
99
- "energy": ["mae"],
100
- "forces": ["mae"],
101
- }
102
- evaluator = Evaluator(eval_metrics=eval_metrics)
103
-
104
- # Get order once for all subsets
105
  order = get_order(submission_filename, annotations_path)
106
 
107
  with np.load(submission_filename) as data:
108
  forces = data["forces"]
109
  energy = data["energy"][order]
110
- natoms = data["natoms"]
111
- forces = np.array(np.split(forces, np.cumsum(natoms)[:-1]), dtype=object)[order]
112
- natoms = natoms[order]
113
 
114
  if len(set(np.where(np.isinf(energy))[0])) != 0:
115
  inf_energy_ids = list(set(np.where(np.isinf(energy))[0]))
@@ -132,36 +122,19 @@ def s2ef_metrics(
132
  [data_id in allowed_ids for data_id in target_data_ids]
133
  )
134
 
135
- sub_energy = torch.from_numpy(energy[subset_mask])
136
- sub_forces = torch.from_numpy(np.concatenate(forces[subset_mask]))
137
- sub_natoms = torch.from_numpy(natoms[subset_mask])
138
-
139
- submission_data = {
140
- "energy": sub_energy,
141
- "forces": sub_forces,
142
- "natoms": sub_natoms,
143
- }
144
 
145
- target_energy_tensor = torch.from_numpy(target_energy[subset_mask])
146
- target_force_tensors = torch.from_numpy(
147
- np.concatenate(target_forces[subset_mask])
148
- )
149
- target_natoms_tensor = torch.tensor(
150
- [force_array.shape[0] for force_array in target_forces[subset_mask]],
151
- dtype=torch.long,
152
- )
153
 
154
- annotations_data = {
155
- "energy": target_energy_tensor,
156
- "forces": target_force_tensors,
157
- "natoms": target_natoms_tensor,
158
- }
159
-
160
- subset_metrics = evaluator.eval(
161
- submission_data, annotations_data, prev_metrics={}
162
- )
163
- for key in ["energy_mae", "forces_mae"]:
164
- metrics[f"{subset}_{key}"] = subset_metrics[key]["metric"]
165
 
166
  return metrics
167
 
@@ -217,4 +190,4 @@ def evaluate(
217
  else:
218
  raise ValueError(f"Unknown eval_type: {eval_type}")
219
 
220
- return metrics
 
5
  import numpy as np
6
  import torch
7
  import json
 
8
 
9
  from fairchem.data.omol.modules.evaluator import (
10
  ligand_pocket,
 
94
  submission_filename: Path,
95
  subsets: list = ["all"],
96
  ) -> Dict[str, float]:
 
 
 
 
 
 
 
97
  order = get_order(submission_filename, annotations_path)
98
 
99
  with np.load(submission_filename) as data:
100
  forces = data["forces"]
101
  energy = data["energy"][order]
102
+ forces = np.array(np.split(forces, np.cumsum(data["natoms"])[:-1]), dtype=object)[order]
 
 
103
 
104
  if len(set(np.where(np.isinf(energy))[0])) != 0:
105
  inf_energy_ids = list(set(np.where(np.isinf(energy))[0]))
 
122
  [data_id in allowed_ids for data_id in target_data_ids]
123
  )
124
 
125
+ sub_energy = energy[subset_mask]
126
+ sub_target_energy = target_energy[subset_mask]
127
+ energy_mae = np.mean(np.abs(sub_target_energy - sub_energy))
128
+ metrics[f"{subset}_energy_mae"] = energy_mae
 
 
 
 
 
129
 
130
+ forces_mae = 0
131
+ natoms = 0
132
+ for sub_forces, sub_target_forces in zip(forces[subset_mask], target_forces[subset_mask]):
133
+ forces_mae += np.sum(np.abs(sub_target_forces - sub_forces))
134
+ natoms += sub_forces.shape[0]
135
+ forces_mae /= (3*natoms)
 
 
136
 
137
+ metrics[f"{subset}_forces_mae"] = forces_mae
 
 
 
 
 
 
 
 
 
 
138
 
139
  return metrics
140
 
 
190
  else:
191
  raise ValueError(f"Unknown eval_type: {eval_type}")
192
 
193
+ return metrics