CultriX commited on
Commit
5b7b13a
1 Parent(s): 6afe180

Upload folder using huggingface_hub

Browse files
__pycache__/agg_functions.cpython-311.pyc ADDED
Binary file (2.88 kB). View file
 
agg_functions.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import numpy as np
4
+
5
+
6
+ try:
7
+ import tinyBenchmarks as tb
8
+ except ModuleNotFoundError:
9
+ raise ModuleNotFoundError(
10
+ "`tinyBenchmarks` is required for tinyBenchmarks task metric calculation, install via \
11
+ `pip install git+https://github.com/felipemaiapolo/tinyBenchmarks`"
12
+ )
13
+
14
+
15
+ def agg_pirt(items: List[float], benchmark: str) -> float:
16
+ items = np.array(items)
17
+ predictions = tb.evaluate(items, benchmark)
18
+ return predictions[benchmark]["pirt"]
19
+
20
+
21
+ def agg_gpirt_arc(items: List[float], benchmark: str = "arc") -> float:
22
+ items = np.array(items)
23
+ predictions = tb.evaluate(items, benchmark)
24
+ return predictions[benchmark]["gpirt"]
25
+
26
+
27
+ def agg_gpirt_gsm8k(items: List[float], benchmark: str = "gsm8k") -> float:
28
+ items = np.array(items)
29
+ predictions = tb.evaluate(items, benchmark)
30
+ return predictions[benchmark]["gpirt"]
31
+
32
+
33
+ def agg_gpirt_hellaswag(items: List[float], benchmark: str = "hellaswag") -> float:
34
+ items = np.array(items)
35
+ predictions = tb.evaluate(items, benchmark)
36
+ return predictions[benchmark]["gpirt"]
37
+
38
+
39
+ def agg_gpirt_mmlu(items: List[float], benchmark: str = "mmlu") -> float:
40
+ items = np.array(items)
41
+ predictions = tb.evaluate(items, benchmark)
42
+ return predictions[benchmark]["gpirt"]
43
+
44
+
45
+ def agg_gpirt_truthfulqa(items: List[float], benchmark: str = "truthfulqa") -> float:
46
+ items = np.array(items)
47
+ predictions = tb.evaluate(items, benchmark)
48
+ return predictions[benchmark]["gpirt"]
49
+
50
+
51
+ def agg_gpirt_winogrande(items: List[float], benchmark: str = "winogrande") -> float:
52
+ items = np.array(items)
53
+ predictions = tb.evaluate(items, benchmark)
54
+ return predictions[benchmark]["gpirt"]
arc_commonsense.yaml CHANGED
@@ -1,4 +1,4 @@
1
- task: arc_commonsense
2
  dataset_path: CultriX/arc-challenge-train-100
3
  task: arc_challenge
4
  dataset_path: allenai/ai2_arc
 
1
+ task: arc_challenge
2
  dataset_path: CultriX/arc-challenge-train-100
3
  task: arc_challenge
4
  dataset_path: allenai/ai2_arc
tinyArc.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ task: tinyArc
3
+ dataset_path: tinyBenchmarks/tinyAI2_arc
4
+ dataset_name: ARC-Challenge
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ test_split: test
9
+ num_fewshot: 25
10
+ doc_to_text: "Question: {{question}}
11
+ Answer:"
12
+ doc_to_target: "{{choices.label.index(answerKey)}}"
13
+ doc_to_choice: "{{choices.text}}"
14
+ should_decontaminate: true
15
+ doc_to_decontamination_query: "Question: {{question}}
16
+ Answer:"
17
+ metric_list:
18
+ - metric: acc_norm
19
+ aggregation: !function agg_functions.agg_gpirt_arc
20
+ higher_is_better: true
21
+ metadata:
22
+ version: 0.0