CultriX commited on
Commit
6afe180
·
verified ·
1 Parent(s): c763d7a

Upload folder using huggingface_hub

Browse files
__pycache__/preprocess_multiwoz.cpython-311.pyc ADDED
Binary file (553 Bytes). View file
 
arc_commonsense.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: arc_commonsense
2
+ dataset_path: CultriX/arc-challenge-train-100
3
+ task: arc_challenge
4
+ dataset_path: allenai/ai2_arc
5
+ dataset_name: ARC-Challenge
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}
11
+ Answer:"
12
+ doc_to_target: "{{choices.label.index(answerKey)}}"
13
+ doc_to_choice: "{{choices.text}}"
14
+ should_decontaminate: true
15
+ doc_to_decontamination_query: "Question: {{question}}
16
+ Answer:"
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ - metric: acc_norm
22
+ aggregation: mean
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 1.0
mathqa.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ import re
3
+ def doc_to_text(doc) -> str:
4
+ problem = doc['question']
5
+ return f"Problem: {problem}\nAnswer:"
mathqa.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: mathqa
2
+ dataset_path: CultriX/mathqa-train-100
3
+ training_split: train
4
+ validation_split: train
5
+ tag:
6
+ - math_word_problems
7
+ task: mathqa
8
+ dataset_name: main
9
+ output_type: generate_until
10
+ fewshot_split: train
11
+ test_split: test
12
+ doc_to_text: |-
13
+ Question: {{question}}
14
+ Answer:
15
+ doc_to_target: "{{answer}}"
16
+ metric_list:
17
+ - metric: exact_match
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ ignore_case: true
21
+ ignore_punctuation: false
22
+ regexes_to_ignore:
23
+ - ','
24
+ - '(?s).*#### '
25
+ generation_kwargs:
26
+ until:
27
+ - 'Question:'
28
+ - </s>
29
+ - <|im_end|>
30
+ do_sample: false
31
+ temperature: 0
32
+ repeats: 1
33
+ num_fewshot: 5
34
+ filter_list:
35
+ - name: strict-match
36
+ filter:
37
+ - function: regex
38
+ regex_pattern: '#### (\-?[0-9\.\,]+)'
39
+ - function: take_first
40
+ metadata:
41
+ version: 3
mmlu-pro.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: mmlu-pro
2
+ dataset_path: CultriX/mmlu-pro-train-100
3
+ output_type: multiple_choice
4
+ training_split: test
5
+ validation_split: test
6
+ test_split: test
7
+ fewshot_split: validation
8
+ fewshot_config:
9
+ sampler: first_n
10
+ doc_to_text: !function utils.fewshot_to_text
11
+ doc_to_target: ""
12
+ output_type: generate_until
13
+ doc_to_text: !function utils.doc_to_text
14
+ doc_to_target: answer
15
+ filter_list:
16
+ - name: "custom-extract"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: 'answer is \(?([ABCDEFGHIJ])\)?'
20
+ # regex_pattern: r".*[aA]nswer:\s*([A-J])",
21
+ - function: "take_first"
22
+ generation_kwargs:
23
+ until:
24
+ - "</s>"
25
+ - "Q:"
26
+ - "<|im_end|>"
27
+ do_sample: false
28
+ temperature: 0.0
29
+ num_fewshot: 5
30
+ metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ higher_is_better: true
34
+ ignore_case: true
35
+ ignore_punctuation: true
36
+ metadata:
37
+ version: 1.0
multiwoz_dialogue.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: multiwoz_dialogue
2
+ dataset_path: CultriX/multiwoz-2turn-train-100
3
+ output_type: generate_until
4
+ training_split: train
5
+ validation_split: train
6
+ test_split: train
7
+ doc_to_text: !function preprocess_multiwoz.doc_to_text
8
+ doc_to_choice: []
9
+ doc_to_target: "{{turns[1].utterance}}"
10
+ metric_list:
11
+ - metric: bleu
12
+ aggregation: mean
13
+ higher_is_better: true
14
+ metadata:
15
+ version: 1.0
preprocess_arc.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ def doc_to_text(doc) -> str:
3
+ choices = ""
4
+ for idx, choice in enumerate(doc["choices"]["text"]):
5
+ letter = "ABCDE"[idx]
6
+ choices+= f"{letter}. {choice}\n"
7
+ return f"Question: {doc['question']}\n{choices}\nAnswer:"
preprocess_mmlu.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ def doc_to_text(doc) -> str:
3
+ choices = ""
4
+ for idx, choice in enumerate(doc["choices"]):
5
+ letter = "ABCD"[idx]
6
+ choices += f"{letter}. {choice}\n"
7
+ return f"Question: {doc['question']}\n{choices}\nAnswer:"
preprocess_multiwoz.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ def doc_to_text(doc) -> str:
3
+ history = ""
4
+ if len(doc['turns']) > 0:
5
+ history += f"User: {doc['turns'][0]['utterance']}\n"
6
+ history += "System: "
7
+ return history
preprocess_triviaqa.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def doc_to_text(doc) -> str:
3
+ question = doc['question']
4
+ choices = ""
5
+ for idx, choice in enumerate(doc['answer']['aliases']):
6
+ if idx >= 5:
7
+ break
8
+ letter = "ABCDE"[idx]
9
+ choices+= f"{letter}. {choice}\n"
10
+ return f"Question: {question}\n{choices}Answer:"
qnli.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ def doc_to_text(doc) -> str:
3
+ return f"Premise: {doc['premise']}\nHypothesis: {doc['hypothesis']}\nRelation:"
qnli.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: qnli
2
+ dataset_path: CultriX/qnli-train-100
3
+ train_spit: train
4
+ validation_split: validation
5
+ test_split: test
6
+ tag:
7
+ - qnli
8
+ task: qnli
9
+ tag: glue
10
+ dataset_name: qnli-train-100
11
+ output_type: multiple_choice
12
+ training_split: train
13
+ validation_split: validation
14
+ doc_to_text: "{{question}}
15
+ {{sentence}}
16
+ Question: Does this response answer the question?
17
+ Answer:"
18
+ doc_to_target: label
19
+ doc_to_choice: ["yes", "no"]
20
+ metric_list:
21
+ - metric: acc
22
+ metadata:
23
+ version: 1.0
triviaqa_qa.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: triviaqa_qa
2
+ dataset_path: CultriX/triviaqa-rc-train-100
3
+ output_type: multiple_choice
4
+ training_split: train
5
+ validation_split: train
6
+ test_split: train
7
+ doc_to_text: "Question: {{question}}?
8
+ Answer:"
9
+ doc_to_target: "{{answer.aliases}}"
10
+ should_decontaminate: true
11
+ doc_to_decontamination_query: question
12
+ generation_kwargs:
13
+ until:
14
+ - "
15
+ "
16
+ - "."
17
+ - ","
18
+ do_sample: false
19
+ temperature: 0.0
20
+ filter_list:
21
+ - name: remove_whitespace
22
+ filter:
23
+ - function: remove_whitespace
24
+ - function: take_first
25
+ target_delimiter: " "
26
+ metric_list:
27
+ - metric: exact_match
28
+ aggregation: mean
29
+ higher_is_better: true
30
+ ignore_case: true
31
+ ignore_punctuation: true
32
+ metadata:
33
+ version: 3.0