Muennighoff commited on
Commit
e1bb252
1 Parent(s): 7785f02

Create val.sh

Browse files
Files changed (1) hide show
  1. val.sh +345 -0
val.sh ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/tasky
20
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/p31lossseq
21
+
22
+ CKPTS=(
23
+ global_step250
24
+ global_step500
25
+ global_step750
26
+ global_step1000
27
+ global_step1250
28
+ )
29
+ EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmixnewcodelonglossseq
30
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/6b3t0
31
+ OUT_PREFIX=tasky_
32
+ #OUT_PREFIX=p31lossseq
33
+
34
+ TP=1
35
+
36
+ ### CONVERT ###
37
+
38
+
39
+ for i in {0..6}; do
40
+ CKPT=${CKPTS[$i]}
41
+ echo "$i"
42
+ echo "Running $CKPT"
43
+
44
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
45
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json
46
+
47
+ # Copy tokenizer.json etc
48
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
49
+
50
+ eval_script="./eval_$i.slurm"
51
+ cat <<EOT > $eval_script
52
+ #!/bin/bash
53
+ #SBATCH --job-name=evaluate_t0
54
+ #SBATCH --nodes=1
55
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
56
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
57
+ #SBATCH --hint=nomultithread # we get physical cores not logical
58
+ #SBATCH --gres=gpu:1 # number of gpus
59
+ #SBATCH --constraint=a100
60
+ #SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS)
61
+ #SBATCH --output=%x-%j.out # output file name
62
+ #SBATCH --account=ajs@a100
63
+ #SBATCH --array=0-168
64
+
65
+ set -x -e
66
+
67
+ source $six_ALL_CCFRWORK/start-py38-pt111
68
+ conda activate thomas_t_zero_evaluation
69
+
70
+ CHECKPOINT_PATH=$OUTPUTCKPT
71
+
72
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
73
+ pushd "\$WORKDIR"
74
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
75
+ mkdir -p "\$OUTPUT_DIR"
76
+
77
+ # Validation
78
+ DATASETS_AND_CONFIGS_VAL=(
79
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
80
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
81
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
82
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
83
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
84
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
85
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
86
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
87
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
88
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
89
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
90
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
91
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
92
+ climate_fever,None,None,"third_evidence_claim_pair",test
93
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
94
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
95
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
96
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
97
+ aqua_rat,raw,None,"select_the_best_option",validation
98
+ aqua_rat,raw,None,"answer_quiz",validation
99
+ aqua_rat,raw,None,"Answer questions from options",validation
100
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
101
+ commonsense_qa,None,None,"question_answering",validation
102
+ commonsense_qa,None,None,"most_suitable_answer",validation
103
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
104
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
105
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
106
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
107
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
108
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
109
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
110
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
111
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
112
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
113
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
114
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
115
+ art,None,None,"choose_hypothesis_options",validation
116
+ art,None,None,"choose_hypothesis_believable",validation
117
+ art,None,None,"choose_hypothesis",validation
118
+ art,None,None,"choose_hypothesis_desc",validation
119
+ art,None,None,"choose_hypothesis_likely",validation
120
+ banking77,None,None,"help_page_topic",test
121
+ banking77,None,None,"direct_to_which_department",test
122
+ banking77,None,None,"rephrase_as_banking_term",test
123
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
124
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
125
+ blbooksgenre,title_genre_classifiction,None,"classify",train
126
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
127
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
128
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
129
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
130
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
131
+ conv_ai_3,None,None,"clarification_needed",validation
132
+ conv_ai_3,None,None,"score_give_number",validation
133
+ conv_ai_3,None,None,"ambiguous",validation
134
+ conv_ai_3,None,None,"directly_answer",validation
135
+ conv_ai_3,None,None,"score_how_much",validation
136
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
137
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
138
+ craigslist_bargains,None,None,"good deal for seller",validation
139
+ craigslist_bargains,None,None,"best deal",validation
140
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
141
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
142
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
143
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
144
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
145
+ emo,None,None,"persons_describe",validation
146
+ emo,None,None,"final_message",validation
147
+ emo,None,None,"what_emotion_do_you_think",validation
148
+ emo,None,None,"emotional_state",validation
149
+ emo,None,None,"dialogue_between",validation
150
+ emotion,None,None,"choose_the_best_emotion_label",test
151
+ emotion,None,None,"reply_with_emoation_label",test
152
+ emotion,None,None,"answer_with_class_label",test
153
+ emotion,None,None,"answer_question_with_emotion_label",test
154
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
155
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
156
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
157
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
158
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
159
+ glue,cola,None,"Make sense yes no",validation
160
+ glue,cola,None,"is_this_correct",validation
161
+ glue,cola,None,"editing",validation
162
+ glue,cola,None,"Following sentence acceptable",validation
163
+ glue,cola,None,"Previous sentence acceptable",validation
164
+ glue,sst2,None,"positive negative after",validation
165
+ glue,sst2,None,"review",validation
166
+ glue,sst2,None,"said",validation
167
+ glue,sst2,None,"following positive negative",validation
168
+ glue,sst2,None,"happy or mad",validation
169
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
170
+ health_fact,None,None,"claim_explanation_classification",validation
171
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
172
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
173
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
174
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
175
+ hlgd,None,None,"is_same_event_refer",validation
176
+ hlgd,None,None,"is_same_event_editor_asks",validation
177
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
178
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
179
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
180
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
181
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
182
+ liar,None,None,"Given statement guess category",validation
183
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
184
+ lince,sa_spaeng,None,"sentiment trying to express",validation
185
+ lince,sa_spaeng,None,"express sentiment",validation
186
+ lince,sa_spaeng,None,"negation template",validation
187
+ lince,sa_spaeng,None,"the author seem",validation
188
+ math_qa,None,None,"choose_correct_og",test
189
+ math_qa,None,None,"pick_the_correct",test
190
+ math_qa,None,None,"first_choice_then_problem",test
191
+ math_qa,None,None,"problem_set_type",test
192
+ math_qa,None,None,"gre_problem",test
193
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
194
+ movie_rationales,None,None,"Evidences sentiment classification",validation
195
+ movie_rationales,None,None,"Evidences + review",validation
196
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
197
+ mwsc,None,None,"in-the-sentence-question-first",validation
198
+ mwsc,None,None,"what-think",validation
199
+ mwsc,None,None,"in-the-sentence",validation
200
+ mwsc,None,None,"options-or",validation
201
+ mwsc,None,None,"is-correct",validation
202
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
203
+ poem_sentiment,None,None,"question_answer_format",validation
204
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
205
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
206
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
207
+ onestop_english,None,None,"esl_context",train
208
+ onestop_english,None,None,"ara_context",train
209
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
210
+ onestop_english,None,None,"esl_variation",train
211
+ onestop_english,None,None,"assess",train
212
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
213
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
214
+ riddle_sense,None,None,"most_suitable_answer",validation
215
+ riddle_sense,None,None,"answer_given_question_without_options",validation
216
+ riddle_sense,None,None,"question_to_answer_index",validation
217
+ riddle_sense,None,None,"question_answering",validation
218
+ scicite,None,None,"Classify intent w/section (select choice)",validation
219
+ scicite,None,None,"Classify intent (choices first)",validation
220
+ scicite,None,None,"Classify intent (select choice)",validation
221
+ scicite,None,None,"Classify intent",validation
222
+ scicite,None,None,"can_describe",validation
223
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
224
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
225
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
226
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
227
+ snips_built_in_intents,None,None,"voice_intent",train
228
+ snips_built_in_intents,None,None,"categorize_query",train
229
+ snips_built_in_intents,None,None,"intent_query",train
230
+ snips_built_in_intents,None,None,"categorize_query_brief",train
231
+ snips_built_in_intents,None,None,"query_intent",train
232
+ )
233
+
234
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
235
+ echo "\$ARGUMENT"
236
+
237
+ # Run T0 evaluation
238
+ # For PrefixLM add --prefixlm
239
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
240
+ python t-zero/evaluation/run_eval.py \
241
+ --dataset_name "\$dataset_name" \
242
+ --dataset_config_name "\$dataset_config_name" \
243
+ --template_config_name "\$template_config_name" \
244
+ --template_name "\$template_name" \
245
+ --split "\$split" \
246
+ --model_name_or_path "\$CHECKPOINT_PATH" \
247
+ --output_dir "\$OUTPUT_DIR" \
248
+ --per_device_eval_batch_size 4 \
249
+ --max_length 2048 \
250
+ --dtype float16
251
+ EOT
252
+
253
+ sbatch $eval_script
254
+
255
+
256
+ lm_eval_script="./lm_eval_$i.slurm"
257
+ cat <<EOT > $lm_eval_script
258
+ #!/bin/bash
259
+ #SBATCH --job-name=lmeval
260
+ #SBATCH --nodes=1
261
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
262
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
263
+ #SBATCH --hint=nomultithread # we get physical cores not logical
264
+ #SBATCH --gres=gpu:1 # number of gpus
265
+ #SBATCH --constraint=a100
266
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
267
+ #SBATCH --output=%x-%j.out # output file name
268
+ #SBATCH --account=ajs@a100
269
+ #SBATCH --array=0-12
270
+
271
+ set -x -e
272
+
273
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
274
+ conda activate muennighofflmevalgen
275
+
276
+ echo "START TIME: $(date)"
277
+
278
+ # defining the right environment variables
279
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
280
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
281
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
282
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
283
+ export HF_DATASETS_OFFLINE=1
284
+ export TRANSFORMERS_OFFLINE=1
285
+ export TOKENIZERS_PARALLELISM=false
286
+
287
+ # Converted transformer checkpoint
288
+ MODEL_CKPT=$OUTPUTCKPT
289
+
290
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
291
+
292
+
293
+ DATASETS_AND_CONFIGS=(
294
+ wmt14_fr_en,fr-en,"version-en-fr-target"
295
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
296
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
297
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
298
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
299
+ wmt14_fr_en,fr-en,"version-fr-en-target"
300
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
301
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
302
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
303
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
304
+ wmt14_hi_en,hi-en,"version-en-hi-target"
305
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
306
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
307
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
308
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
309
+ wmt14_hi_en,hi-en,"version-hi-en-target"
310
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
311
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
312
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
313
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
314
+ mlsum_es,"es","layman_summ_es"
315
+ mlsum_es,"es","palm_prompt"
316
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
317
+ )
318
+
319
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
320
+ echo "\$ARGUMENT"
321
+
322
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
323
+
324
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
325
+ python main.py \
326
+ --model_api_name 'hf-causal' \
327
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \
328
+ --device cuda \
329
+ --batch_size 16 \
330
+ --no_tracking \
331
+ --task_name "\$dataset_name" \
332
+ --template_names "\$template_name" \
333
+ --bootstrap_iters 10 \
334
+ --limit 3000
335
+
336
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
337
+ mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/"
338
+
339
+ echo "END TIME: $(date)"
340
+ EOT
341
+
342
+ sbatch $lm_eval_script
343
+
344
+
345
+ done