kpfadnis commited on
Commit
329f495
·
1 Parent(s): 53257e4

chore (UX): Create dedicated worker to handle filtering in model behavior view.

Browse files
src/types.ts CHANGED
@@ -234,3 +234,26 @@ export interface Data extends TileData {
234
  tasks: Task[];
235
  readonly evaluations: TaskEvaluation[];
236
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  tasks: Task[];
235
  readonly evaluations: TaskEvaluation[];
236
  }
237
+
238
+ // ===================================================================================
239
+ // WORKERS
240
+ // ===================================================================================
241
+ export interface RequestMessage {
242
+ evaluationsPerMetric: { [key: string]: TaskEvaluation[] };
243
+ filters: { [key: string]: string[] };
244
+ expression: object;
245
+ models: Model[];
246
+ agreementLevels: { [key: string]: number | string }[];
247
+ metric?: Metric;
248
+ allowedValues?: string[];
249
+ annotator?: string;
250
+ }
251
+
252
+ export interface ResponseMessage {
253
+ records: {
254
+ taskId: string;
255
+ modelName: string;
256
+ [key: string]: string | number;
257
+ }[];
258
+ evaluations: TaskEvaluation[];
259
+ }
src/views/model-behavior/ModelBehavior.tsx CHANGED
@@ -27,25 +27,18 @@ import {
27
  FilterableMultiSelect,
28
  Select,
29
  SelectItem,
30
- Tooltip,
31
- Button,
32
  Toggletip,
33
  ToggletipButton,
34
  ToggletipContent,
35
  ToggletipActions,
 
36
  } from '@carbon/react';
37
- import {
38
- Information,
39
- WarningAlt,
40
- Filter,
41
- ChevronDown,
42
- ChevronUp,
43
- } from '@carbon/icons-react';
44
  import { GroupedBarChart } from '@carbon/charts-react';
45
  import { ScaleTypes } from '@carbon/charts';
46
 
47
  import { useTheme } from '@/src/theme';
48
- import { TaskEvaluation, Model, Metric } from '@/src/types';
49
  import {
50
  AgreementLevels,
51
  AgreementLevelDefinitions,
@@ -54,9 +47,7 @@ import {
54
  bin,
55
  compareMetricAggregatedValues,
56
  } from '@/src/utilities/metrics';
57
- import { areObjectsIntersecting } from '@/src/utilities/objects';
58
  import { getModelColorPalette } from '@/src/utilities/colors';
59
- import { evaluate } from '@/src/utilities/expressions';
60
  import TasksTable from '@/src/views/tasks-table/TasksTable';
61
  import MetricSelector from '@/src/components/selectors/MetricSelector';
62
  import Filters from '@/src/components/filters/Filters';
@@ -151,186 +142,6 @@ function prepareGroupBarChartData(
151
  });
152
  }
153
 
154
- function process(
155
- evaluationsPerMetric: { [key: string]: TaskEvaluation[] },
156
- selectedAgreementLevels: { [key: string]: number | string }[],
157
- selectedModels: Model[],
158
- selectedMetric: Metric | undefined,
159
- selectedAllowedValues: string[],
160
- selectedAnnotator: string | undefined,
161
- filters: { [key: string]: string[] },
162
- expression?: object,
163
- ): [(record & { [key: string]: string | number })[], TaskEvaluation[]] {
164
- // Step 1: Initialize necessary variables
165
- const models = selectedModels.reduce(
166
- (obj, item) => ((obj[item.modelId] = item), obj),
167
- {},
168
- );
169
- const records: (record & { [key: string]: string | number })[] = [];
170
- const visibleEvaluations: TaskEvaluation[] = [];
171
-
172
- // Step 2: If filters are specified
173
- const filteredEvaluationsPerMetric: { [key: string]: TaskEvaluation[] } = {};
174
- for (const [metric, evals] of Object.entries(evaluationsPerMetric)) {
175
- filteredEvaluationsPerMetric[metric] = !isEmpty(filters)
176
- ? evals.filter((e) => areObjectsIntersecting(filters, e))
177
- : evals;
178
- }
179
-
180
- // Step 3: If a metric is selected
181
- if (selectedMetric) {
182
- // Step 3.a: If an expression is specified
183
- if (expression && !isEmpty(expression)) {
184
- // Step 3.a.ii: Build an object containing evaluations per model for every task
185
- const evaluationsPerTaskPerModel: {
186
- [key: string]: { [key: string]: TaskEvaluation };
187
- } = {};
188
- filteredEvaluationsPerMetric[selectedMetric.name].forEach(
189
- (evaluation) => {
190
- if (evaluationsPerTaskPerModel.hasOwnProperty(evaluation.taskId)) {
191
- evaluationsPerTaskPerModel[evaluation.taskId][evaluation.modelId] =
192
- evaluation;
193
- } else {
194
- evaluationsPerTaskPerModel[evaluation.taskId] = {
195
- [evaluation.modelId]: evaluation,
196
- };
197
- }
198
- },
199
- );
200
-
201
- // Step 3.a.iii: Find evaluations meeting expression criteria
202
- evaluate(
203
- evaluationsPerTaskPerModel,
204
- expression,
205
- selectedMetric,
206
- selectedAnnotator,
207
- ).forEach((evaluation) => {
208
- // Step 3.a.iii.*: Create and add record
209
- records.push({
210
- taskId: evaluation.taskId,
211
- modelName: models[evaluation.modelId].name,
212
- [`${selectedMetric.name}_value`]:
213
- evaluation[`${selectedMetric.name}_agg`].value,
214
- [`${selectedMetric.name}_aggLevel`]:
215
- evaluation[`${selectedMetric.name}_agg`].level,
216
- });
217
-
218
- // Step 3.a.iii.**: Add evaluation
219
- visibleEvaluations.push(evaluation);
220
- });
221
- } else {
222
- // Step 3.b: Filter evaluations for the selected metric
223
- filteredEvaluationsPerMetric[selectedMetric.name].forEach(
224
- (evaluation) => {
225
- // Step 3.b.i: If individual annotator is selected, verify against annotator's value
226
- if (selectedAnnotator) {
227
- /**
228
- * Evaluation's model id fall within selected models
229
- * OR
230
- * Evaluation's selected metric's value fall within allowed values
231
- */
232
- if (
233
- evaluation.modelId in models &&
234
- evaluation[selectedMetric.name].hasOwnProperty(
235
- selectedAnnotator,
236
- ) &&
237
- (!selectedAllowedValues.length ||
238
- selectedAllowedValues.includes(
239
- evaluation[selectedMetric.name][selectedAnnotator].value,
240
- ))
241
- ) {
242
- // Step 3.b.i.*: Create and add record
243
- records.push({
244
- taskId: evaluation.taskId,
245
- modelName: models[evaluation.modelId].name,
246
- [`${selectedMetric.name}_value`]:
247
- evaluation[selectedMetric.name][selectedAnnotator].value,
248
- });
249
-
250
- // Step 3.b.i.**: Add evaluation
251
- visibleEvaluations.push(evaluation);
252
- }
253
- } else {
254
- // Step 3.b.ii: Verify against aggregate value
255
- if (
256
- evaluation.modelId in models &&
257
- selectedAgreementLevels
258
- .map((level) => level.value)
259
- .includes(evaluation[`${selectedMetric.name}_agg`].level) &&
260
- (!selectedAllowedValues.length ||
261
- selectedAllowedValues.includes(
262
- evaluation[`${selectedMetric.name}_agg`].value,
263
- ))
264
- ) {
265
- // Step 3.b.ii.*: Create and add record
266
- records.push({
267
- taskId: evaluation.taskId,
268
- modelName: models[evaluation.modelId].name,
269
- [`${selectedMetric.name}_value`]:
270
- evaluation[`${selectedMetric.name}_agg`].value,
271
- [`${selectedMetric.name}_aggLevel`]:
272
- evaluation[`${selectedMetric.name}_agg`].level,
273
- });
274
-
275
- // Step 3.b.ii.**: Add evaluation
276
- visibleEvaluations.push(evaluation);
277
- }
278
- }
279
- },
280
- );
281
- }
282
- } else {
283
- // Step 3: For every metric
284
- for (const [metric, evaluations] of Object.entries(
285
- filteredEvaluationsPerMetric,
286
- )) {
287
- evaluations.forEach((evaluation) => {
288
- // Step 3.a: If invidiual annotator is selected, verify against annotator's value
289
- if (selectedAnnotator) {
290
- /**
291
- * Evaluation's model id fall within selected models
292
- * OR
293
- * Evaluation's selected metric's value fall within allowed values
294
- */
295
- if (
296
- evaluation.modelId in models &&
297
- evaluation[metric].hasOwnProperty(selectedAnnotator) &&
298
- (!selectedAllowedValues.length ||
299
- selectedAllowedValues.includes(
300
- evaluation[metric][selectedAnnotator].value,
301
- ))
302
- ) {
303
- records.push({
304
- taskId: evaluation.taskId,
305
- modelName: models[evaluation.modelId].name,
306
- [`${metric}_value`]: evaluation[metric][selectedAnnotator].value,
307
- });
308
- }
309
- } else {
310
- // Step 3.a: Verify against aggregate value
311
- if (
312
- evaluation.modelId in models &&
313
- selectedAgreementLevels
314
- .map((level) => level.value)
315
- .includes(evaluation[`${metric}_agg`].level) &&
316
- (!selectedAllowedValues.length ||
317
- selectedAllowedValues.includes(evaluation[`${metric}_agg`].value))
318
- ) {
319
- records.push({
320
- taskId: evaluation.taskId,
321
- modelName: models[evaluation.modelId].name,
322
- [`${metric}_value`]: evaluation[`${metric}_agg`].value,
323
- [`${metric}_aggLevel`]: evaluation[`${metric}_agg`].level,
324
- });
325
- }
326
- }
327
- });
328
- }
329
- }
330
-
331
- return [records, visibleEvaluations];
332
- }
333
-
334
  // ===================================================================================
335
  // MAIN FUNCTION
336
  // ===================================================================================
@@ -342,6 +153,7 @@ export default function ModelBehavior({
342
  onTaskSelection,
343
  }: Props) {
344
  // Step 1: Initialize state and necessary variables
 
345
  const [WindowWidth, setWindowWidth] = useState<number>(
346
  global?.window && window.innerWidth,
347
  );
@@ -379,6 +191,7 @@ export default function ModelBehavior({
379
  const [visibleEvaluations, setVisibleEvaluations] = useState<
380
  TaskEvaluation[]
381
  >([]);
 
382
 
383
  // Step 2: Run effects
384
  // Step 2.a: Adjust graph width & heigh based on window size
@@ -400,7 +213,36 @@ export default function ModelBehavior({
400
  // Step 2.b: Fetch theme
401
  const { theme } = useTheme();
402
 
403
- // Step 2.c: Identify all annotators
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  const annotators = useMemo(() => {
405
  const annotatorsSet = new Set();
406
  const humanMetricNames = metrics
@@ -419,12 +261,12 @@ export default function ModelBehavior({
419
  return annotatorsSet;
420
  }, [evaluationsPerMetric, metrics]);
421
 
422
- // Step 2.d: Reset expression, if selected metric changes
423
  useEffect(() => {
424
  setExpression({});
425
  }, [selectedMetric]);
426
 
427
- // Step 2.e: Configure available majority values, if metric is selected
428
  const availableAllowedValues = useMemo(() => {
429
  if (selectedMetric && selectedMetric.type === 'categorical') {
430
  if (selectedAnnotator) {
@@ -474,12 +316,12 @@ export default function ModelBehavior({
474
  selectedAgreementLevels,
475
  ]);
476
 
477
- // Step 2.f: Update selected values list
478
  useEffect(() => {
479
  setSelectedAllowedValues(availableAllowedValues);
480
  }, [availableAllowedValues]);
481
 
482
- // Step 2.g: Calculate graph data and prepare visible evaluations list
483
  /**
484
  * Adjust graph records based on selected agreement levels, models and annotator
485
  * visibleEvaluations : [{taskId: <>, modelId: <>, [metric]_score: <>}]
@@ -487,20 +329,22 @@ export default function ModelBehavior({
487
  * * score field could be either majority score or individual annotator's score (based on selected annotator)
488
  */
489
  useEffect(() => {
490
- const [records, evaluations] = process(
491
- evaluationsPerMetric,
492
- selectedAgreementLevels,
493
- selectedModels,
494
- selectedMetric,
495
- selectedAllowedValues,
496
- selectedAnnotator,
497
- selectedFilters,
498
- expression,
499
- );
500
-
501
- // Set graph records and visible evaluations
502
- setGraphRecords(records);
503
- setVisibleEvaluations(evaluations);
 
 
504
  }, [
505
  evaluationsPerMetric,
506
  selectedAgreementLevels,
@@ -512,7 +356,7 @@ export default function ModelBehavior({
512
  expression,
513
  ]);
514
 
515
- // Step 2.h: Calculate visible tasks per metric
516
  const visibleTasksPerMetric = useMemo(() => {
517
  const data = {};
518
  metrics.forEach((metric) => {
@@ -528,7 +372,7 @@ export default function ModelBehavior({
528
  return data;
529
  }, [graphRecords, metrics]);
530
 
531
- // Step 2.i: Buckets human and algoritmic metrics into individual buckets
532
  const [humanMetrics, algorithmMetrics] = useMemo(() => {
533
  const hMetrics: Metric[] = [];
534
  const aMetrics: Metric[] = [];
@@ -546,6 +390,7 @@ export default function ModelBehavior({
546
  // Step 3: Render
547
  return (
548
  <div className={classes.page}>
 
549
  <div className={classes.selectors}>
550
  <div className={classes.modelSelector}>
551
  <FilterableMultiSelect
 
27
  FilterableMultiSelect,
28
  Select,
29
  SelectItem,
 
 
30
  Toggletip,
31
  ToggletipButton,
32
  ToggletipContent,
33
  ToggletipActions,
34
+ Loading,
35
  } from '@carbon/react';
36
+ import { Information, WarningAlt } from '@carbon/icons-react';
 
 
 
 
 
 
37
  import { GroupedBarChart } from '@carbon/charts-react';
38
  import { ScaleTypes } from '@carbon/charts';
39
 
40
  import { useTheme } from '@/src/theme';
41
+ import { TaskEvaluation, Model, Metric, ResponseMessage } from '@/src/types';
42
  import {
43
  AgreementLevels,
44
  AgreementLevelDefinitions,
 
47
  bin,
48
  compareMetricAggregatedValues,
49
  } from '@/src/utilities/metrics';
 
50
  import { getModelColorPalette } from '@/src/utilities/colors';
 
51
  import TasksTable from '@/src/views/tasks-table/TasksTable';
52
  import MetricSelector from '@/src/components/selectors/MetricSelector';
53
  import Filters from '@/src/components/filters/Filters';
 
142
  });
143
  }
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  // ===================================================================================
146
  // MAIN FUNCTION
147
  // ===================================================================================
 
153
  onTaskSelection,
154
  }: Props) {
155
  // Step 1: Initialize state and necessary variables
156
+ const [loading, setLoading] = useState<boolean>(false);
157
  const [WindowWidth, setWindowWidth] = useState<number>(
158
  global?.window && window.innerWidth,
159
  );
 
191
  const [visibleEvaluations, setVisibleEvaluations] = useState<
192
  TaskEvaluation[]
193
  >([]);
194
+ const [filterationWorker, setFilterationWorker] = useState<Worker>();
195
 
196
  // Step 2: Run effects
197
  // Step 2.a: Adjust graph width & heigh based on window size
 
213
  // Step 2.b: Fetch theme
214
  const { theme } = useTheme();
215
 
216
+ // Step 2.c: Set up a worker to perform data filtering
217
+ useEffect(() => {
218
+ // Step 2.c.i: Create a new web worker
219
+ const worker = new Worker(
220
+ new URL('../../workers/filter.ts', import.meta.url),
221
+ );
222
+
223
+ // Step 2.c.ii: Set up event listener for messages from the worker
224
+ worker.onmessage = function (event: MessageEvent<ResponseMessage>) {
225
+ // Step 2.c.ii.*: Copy over response data
226
+ const { records, evaluations } = event.data;
227
+
228
+ // Step 2.c.ii.**: Update graph records and visible evaluations
229
+ setGraphRecords(records);
230
+ setVisibleEvaluations(evaluations);
231
+
232
+ // Step 2.c.ii.***: Set loading to false
233
+ setLoading(false);
234
+ };
235
+
236
+ // Step 2.c.iii: Save the worker instance to state
237
+ setFilterationWorker(worker);
238
+
239
+ // Step 2.c.iv: Clean up the worker when the component unmounts
240
+ return () => {
241
+ worker.terminate();
242
+ };
243
+ }, []);
244
+
245
+ // Step 2.d: Identify all annotators
246
  const annotators = useMemo(() => {
247
  const annotatorsSet = new Set();
248
  const humanMetricNames = metrics
 
261
  return annotatorsSet;
262
  }, [evaluationsPerMetric, metrics]);
263
 
264
+ // Step 2.e: Reset expression, if selected metric changes
265
  useEffect(() => {
266
  setExpression({});
267
  }, [selectedMetric]);
268
 
269
+ // Step 2.f: Configure available majority values, if metric is selected
270
  const availableAllowedValues = useMemo(() => {
271
  if (selectedMetric && selectedMetric.type === 'categorical') {
272
  if (selectedAnnotator) {
 
316
  selectedAgreementLevels,
317
  ]);
318
 
319
+ // Step 2.g: Update selected values list
320
  useEffect(() => {
321
  setSelectedAllowedValues(availableAllowedValues);
322
  }, [availableAllowedValues]);
323
 
324
+ // Step 2.h: Calculate graph data and prepare visible evaluations list
325
  /**
326
  * Adjust graph records based on selected agreement levels, models and annotator
327
  * visibleEvaluations : [{taskId: <>, modelId: <>, [metric]_score: <>}]
 
329
  * * score field could be either majority score or individual annotator's score (based on selected annotator)
330
  */
331
  useEffect(() => {
332
+ // Step 1: Set loading to true
333
+ setLoading(true);
334
+
335
+ // Step 2: Post message to worker to unblock main thread
336
+ if (filterationWorker) {
337
+ filterationWorker.postMessage({
338
+ evaluationsPerMetric: evaluationsPerMetric,
339
+ filters: selectedFilters,
340
+ expression: expression,
341
+ models: selectedModels,
342
+ agreementLevels: selectedAgreementLevels,
343
+ metric: selectedMetric,
344
+ allowedValues: selectedAllowedValues,
345
+ annotator: selectedAnnotator,
346
+ });
347
+ }
348
  }, [
349
  evaluationsPerMetric,
350
  selectedAgreementLevels,
 
356
  expression,
357
  ]);
358
 
359
+ // Step 2.i: Calculate visible tasks per metric
360
  const visibleTasksPerMetric = useMemo(() => {
361
  const data = {};
362
  metrics.forEach((metric) => {
 
372
  return data;
373
  }, [graphRecords, metrics]);
374
 
375
+ // Step 2.j: Buckets human and algoritmic metrics into individual buckets
376
  const [humanMetrics, algorithmMetrics] = useMemo(() => {
377
  const hMetrics: Metric[] = [];
378
  const aMetrics: Metric[] = [];
 
390
  // Step 3: Render
391
  return (
392
  <div className={classes.page}>
393
+ {loading ? <Loading /> : null}
394
  <div className={classes.selectors}>
395
  <div className={classes.modelSelector}>
396
  <FilterableMultiSelect
src/workers/filter.ts ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ *
3
+ * Copyright 2023-2024 InspectorRAGet Team
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ *
17
+ **/
18
+
19
+ import { isEmpty } from 'lodash';
20
+
21
+ import { RequestMessage, TaskEvaluation } from '@/src/types';
22
+ import { areObjectsIntersecting } from '@/src/utilities/objects';
23
+ import { evaluate } from '@/src/utilities/expressions';
24
+
25
+ onmessage = function (event: MessageEvent<RequestMessage>) {
26
+ // Step 1: Initialize necessary variables
27
+ const {
28
+ evaluationsPerMetric,
29
+ filters,
30
+ expression,
31
+ agreementLevels,
32
+ metric,
33
+ allowedValues,
34
+ annotator,
35
+ } = event.data;
36
+ const models = event.data.models.reduce(
37
+ (obj, item) => ((obj[item.modelId] = item), obj),
38
+ {},
39
+ );
40
+ const records: {
41
+ taskId: string;
42
+ modelName: string;
43
+ [key: string]: string | number;
44
+ }[] = [];
45
+ const visibleEvaluations: TaskEvaluation[] = [];
46
+
47
+ // Step 2: If filters are specified
48
+ const filteredEvaluationsPerMetric: { [key: string]: TaskEvaluation[] } = {};
49
+ for (const [metric, evals] of Object.entries(evaluationsPerMetric)) {
50
+ filteredEvaluationsPerMetric[metric] = !isEmpty(filters)
51
+ ? evals.filter((e) => areObjectsIntersecting(filters, e))
52
+ : evals;
53
+ }
54
+
55
+ // Step 3: If a metric is selected
56
+ if (metric) {
57
+ // Step 3.a: If an expression is specified
58
+ if (event.data.expression && !isEmpty(event.data.expression)) {
59
+ // Step 3.a.ii: Build an object containing evaluations per model for every task
60
+ const evaluationsPerTaskPerModel: {
61
+ [key: string]: { [key: string]: TaskEvaluation };
62
+ } = {};
63
+ filteredEvaluationsPerMetric[metric.name].forEach((evaluation) => {
64
+ if (evaluationsPerTaskPerModel.hasOwnProperty(evaluation.taskId)) {
65
+ evaluationsPerTaskPerModel[evaluation.taskId][evaluation.modelId] =
66
+ evaluation;
67
+ } else {
68
+ evaluationsPerTaskPerModel[evaluation.taskId] = {
69
+ [evaluation.modelId]: evaluation,
70
+ };
71
+ }
72
+ });
73
+
74
+ // Step 3.a.iii: Find evaluations meeting expression criteria
75
+ evaluate(
76
+ evaluationsPerTaskPerModel,
77
+ expression,
78
+ metric,
79
+ annotator,
80
+ ).forEach((evaluation) => {
81
+ // Step 3.a.iii.*: Create and add record
82
+ records.push({
83
+ taskId: evaluation.taskId,
84
+ modelName: models[evaluation.modelId].name,
85
+ [`${metric.name}_value`]: evaluation[`${metric.name}_agg`].value,
86
+ [`${metric.name}_aggLevel`]: evaluation[`${metric.name}_agg`].level,
87
+ });
88
+
89
+ // Step 3.a.iii.**: Add evaluation
90
+ visibleEvaluations.push(evaluation);
91
+ });
92
+ } else {
93
+ // Step 3.b: Filter evaluations for the selected metric
94
+ filteredEvaluationsPerMetric[metric.name].forEach((evaluation) => {
95
+ // Step 3.b.i: If individual annotator is selected, verify against annotator's value
96
+ if (annotator) {
97
+ /**
98
+ * Evaluation's model id fall within selected models
99
+ * OR
100
+ * Evaluation's selected metric's value fall within allowed values
101
+ */
102
+ if (
103
+ evaluation.modelId in models &&
104
+ evaluation[metric.name].hasOwnProperty(annotator) &&
105
+ (!allowedValues ||
106
+ isEmpty(allowedValues) ||
107
+ allowedValues.includes(evaluation[metric.name][annotator].value))
108
+ ) {
109
+ // Step 3.b.i.*: Create and add record
110
+ records.push({
111
+ taskId: evaluation.taskId,
112
+ modelName: models[evaluation.modelId].name,
113
+ [`${metric.name}_value`]:
114
+ evaluation[metric.name][annotator].value,
115
+ });
116
+
117
+ // Step 3.b.i.**: Add evaluation
118
+ visibleEvaluations.push(evaluation);
119
+ }
120
+ } else {
121
+ // Step 3.b.ii: Verify against aggregate value
122
+ if (
123
+ evaluation.modelId in models &&
124
+ event.data.agreementLevels
125
+ .map((level) => level.value)
126
+ .includes(evaluation[`${metric.name}_agg`].level) &&
127
+ (!allowedValues ||
128
+ isEmpty(allowedValues) ||
129
+ allowedValues.includes(evaluation[`${metric.name}_agg`].value))
130
+ ) {
131
+ // Step 3.b.ii.*: Create and add record
132
+ records.push({
133
+ taskId: evaluation.taskId,
134
+ modelName: models[evaluation.modelId].name,
135
+ [`${metric.name}_value`]: evaluation[`${metric.name}_agg`].value,
136
+ [`${metric.name}_aggLevel`]:
137
+ evaluation[`${metric.name}_agg`].level,
138
+ });
139
+
140
+ // Step 3.b.ii.**: Add evaluation
141
+ visibleEvaluations.push(evaluation);
142
+ }
143
+ }
144
+ });
145
+ }
146
+ } else {
147
+ // Step 3: For every metric
148
+ for (const [metric, evaluations] of Object.entries(
149
+ filteredEvaluationsPerMetric,
150
+ )) {
151
+ evaluations.forEach((evaluation) => {
152
+ // Step 3.a: If invidiual annotator is selected, verify against annotator's value
153
+ if (annotator) {
154
+ /**
155
+ * Evaluation's model id fall within selected models
156
+ * OR
157
+ * Evaluation's selected metric's value fall within allowed values
158
+ */
159
+ if (
160
+ evaluation.modelId in models &&
161
+ evaluation[metric].hasOwnProperty(annotator) &&
162
+ (!allowedValues ||
163
+ isEmpty(allowedValues) ||
164
+ allowedValues.includes(evaluation[metric][annotator].value))
165
+ ) {
166
+ records.push({
167
+ taskId: evaluation.taskId,
168
+ modelName: models[evaluation.modelId].name,
169
+ [`${metric}_value`]: evaluation[metric][annotator].value,
170
+ });
171
+ }
172
+ } else {
173
+ // Step 3.a: Verify against aggregate value
174
+ if (
175
+ evaluation.modelId in models &&
176
+ agreementLevels
177
+ .map((level) => level.value)
178
+ .includes(evaluation[`${metric}_agg`].level) &&
179
+ (!allowedValues ||
180
+ isEmpty(allowedValues) ||
181
+ allowedValues.includes(evaluation[`${metric}_agg`].value))
182
+ ) {
183
+ records.push({
184
+ taskId: evaluation.taskId,
185
+ modelName: models[evaluation.modelId].name,
186
+ [`${metric}_value`]: evaluation[`${metric}_agg`].value,
187
+ [`${metric}_aggLevel`]: evaluation[`${metric}_agg`].level,
188
+ });
189
+ }
190
+ }
191
+ });
192
+ }
193
+ }
194
+
195
+ // Step 4: Return results
196
+ postMessage({ records: records, evaluations: visibleEvaluations });
197
+ };