File size: 1,850 Bytes
904d64e
 
 
 
 
 
8b25930
bb4e80f
 
2c920f2
904d64e
be34cf8
 
ad1afeb
 
fd449fe
ad1afeb
 
 
 
 
55f9e66
ad1afeb
be34cf8
 
 
ad1afeb
bb4e80f
 
 
ad1afeb
be34cf8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f9e66
be34cf8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#!/usr/bin/bash

# load dir variables
#
. cfg_.config

#MODEL_DIR=/home/diiogofernands/hub/portuguese-roberta-base/portuguese-roberta-base-config
#MODEL_OUT=/home/diiogofernands/hub/portuguese-roberta-base/model_pretrained
#DATA=/home/diiogofernands/extracted/brwac

MODEL_DIR=$BASE_DIR/portuguese-roberta-base-config
MODEL_OUT=$BASE_DIR/models-output/training-id-roberta-base-brwac-oscar-merged
DATA=$DATASET_DIR/merged_brwac-all_oscarpt

DEVICES=8 #TPU CHIPS
PER_DEVICE_BATCH=256

TOTAL_BATCH=$(expr $DEVICES \* $PER_DEVICE_BATCH)
DATASET_SIZE=$(python -c "
from datasets import load_from_disk; 
dataset = load_from_disk('${DATA}');
print(dataset.num_rows)")

TOTAL_STEPS=100000
EPOCHS=$(python -c "print($TOTAL_STEPS/(${DATASET_SIZE} // ${TOTAL_BATCH}))")
NUM_TRAINING_STEPS=$(python -c "print(round((${DATASET_SIZE} // ${TOTAL_BATCH}) * ${EPOCHS}))")

echo "MAX_STEPS = ${NUM_TRAINING_STEPS}"
#--model_config_name $MODEL_DIR \

python ./src/run_mlm_flax_stream.py \
        --output_dir ${MODEL_OUT} \
        --model_type roberta \
        --config_name $MODEL_DIR \
        --tokenizer_name ${MODEL_DIR} \
        --model_name_or_path $BASE_DIR/roberta-base-config \
        --dataset_name brwac_oscar_pt \
        --dataset_path $DATA \
        --max_seq_length 128 \
        --pad_to_max_length  \
        --per_device_train_batch_size $PER_DEVICE_BATCH \
        --per_device_eval_batch_size $PER_DEVICE_BATCH \
        --weight_decay 0.01 \
        --warmup_steps 24000 \
        --overwrite_output_dir \
        --adam_beta1 0.9 \
        --adam_beta2 0.98 \
        --adam_epsilon 1e-6 \
        --learning_rate 6e-4 \
        --num_train_steps $NUM_TRAINING_STEPS \
        --num_eval_samples 5000 \
        --save_step 1000 \
        --logging_steps 500 \
        --eval_steps 1000 \
        --dtype bfloat16 \
        #--push_to_hubv