Muennighoff commited on
Commit
72a7d56
1 Parent(s): e1bb252

Create evalharness.sh

Browse files
Files changed (1) hide show
  1. evalharness.sh +119 -0
evalharness.sh ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11f-6b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=ajs@a100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-py38-pt111
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # a unique identifier for the current eval ideally correspnding to the modelname
21
+ VARIANT="tr11f-6b3-ml-evalharness"
22
+
23
+
24
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/tasky/global_step1000
25
+ MEGATRON_DEEPSPEED_REPO=/gpfsssd/worksf/projects/rech/six/commun/code/eval/Megatron-DeepSpeed
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+
29
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
30
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
31
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
32
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
33
+
34
+ cd $MEGATRON_DEEPSPEED_REPO
35
+
36
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
37
+
38
+ PP_SIZE=1
39
+ TP_SIZE=1
40
+ SEQ_LEN=2048
41
+
42
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
43
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
44
+ EVAL_MICRO_BATCH_SIZE=1
45
+
46
+ #dummy arguments to make megatron happy.
47
+ MEGATRON_REQUIRED_ARGS=" \
48
+ --num-layers -1 \
49
+ --hidden-size -1 \
50
+ --num-attention-heads -1 \
51
+ --seq-length -1 \
52
+ --max-position-embeddings -1 \
53
+ "
54
+
55
+
56
+ ZERO_STAGE=0
57
+
58
+ config_json="./ds_config.json"
59
+
60
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
61
+ cat <<EOT > $config_json
62
+ {
63
+ "train_micro_batch_size_per_gpu": 1,
64
+ "train_batch_size": 1,
65
+ "gradient_clipping": 1.0,
66
+ "zero_optimization": {
67
+ "stage": $ZERO_STAGE
68
+ },
69
+ "bf16": {
70
+ "enabled": false
71
+ },
72
+ "steps_per_print": 2000,
73
+ "wall_clock_breakdown": false
74
+ }
75
+ EOT
76
+
77
+
78
+ CMD="./tasks/eval_harness/evaluate.py \
79
+ --load $CHECKPOINT_PATH \
80
+ --results_path $VARIANT-results.json \
81
+ --tensor-model-parallel-size $TP_SIZE \
82
+ --pipeline-model-parallel-size $PP_SIZE \
83
+ --tokenizer-type PretrainedFromHF \
84
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
85
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
86
+ --no-load-optim \
87
+ --no-load-rng \
88
+ --eval_fp32 \
89
+ --inference \
90
+ --seq-length $SEQ_LEN \
91
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
92
+ --deepspeed \
93
+ --deepspeed_config ds_config.json \
94
+ --intermed_results \
95
+ --adaptive_seq_len \
96
+ --micro_bs_multiplier 4 \
97
+ $MEGATRON_REQUIRED_ARGS \
98
+ "
99
+
100
+ GPUS_PER_NODE=1
101
+ NNODES=$SLURM_NNODES
102
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
103
+ MASTER_PORT=6000
104
+ export LAUNCHER="python -u -m torch.distributed.run \
105
+ --nproc_per_node $GPUS_PER_NODE \
106
+ --nnodes $NNODES \
107
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
108
+ --rdzv_backend c10d \
109
+ --max_restarts 0 \
110
+ --tee 3 \
111
+ "
112
+
113
+ export CUDA_LAUNCH_BLOCKING=1
114
+
115
+ echo $LAUNCHER $CMD
116
+
117
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
118
+
119
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log