pere commited on
Commit
6655b30
1 Parent(s): 4c4e486

updated scripts training norbench

Browse files
__pycache__/my_metrics.cpython-39.pyc ADDED
Binary file (498 Bytes). View file
 
__pycache__/tasks.cpython-39.pyc ADDED
Binary file (5.31 kB). View file
 
norbench_document_sentiment_bs128.gin ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __gin__ import dynamic_registration
2
+ import tasks
3
+ import seqio
4
+ import optax
5
+
6
+ import __main__ as train_script
7
+ from t5.data import mixtures
8
+ from t5x import models
9
+ from t5x import partitioning
10
+ from t5x import utils
11
+
12
+ include 't5x/examples/t5/mt5/base.gin'
13
+ include "t5x/configs/runs/finetune.gin"
14
+
15
+ MIXTURE_OR_TASK_NAME = %gin.REQUIRED
16
+ TASK_FEATURE_LENGTHS = {"inputs": 256, "targets": 256}
17
+ INITIAL_CHECKPOINT_PATH = %gin.REQUIRED
18
+ LR = %gin.REQUIRED
19
+ TRAIN_STEPS = %gin.REQUIRED # 1000000 pre-trained steps + 10000 fine-tuning steps.
20
+ USE_CACHED_TASKS = False
21
+ DROPOUT_RATE = 0.1
22
+ RANDOM_SEED = 0
23
+ BATCH_SIZE = 128
24
+ EVAL_PERIOD = 1000
25
+
26
+ #Fixing a small error
27
+ infer_eval/utils.DatasetConfig:
28
+ task_feature_lengths = %TASK_FEATURE_LENGTHS
29
+
30
+ #Saving every 500 steps
31
+ utils.SaveCheckpointConfig:
32
+ period = 1000
33
+ keep = 1 # number of checkpoints to keep
34
+
35
+
36
+ #optax.adamw.weight_decay = 0.1
37
+ #OPTIMIZER = @optax.adamw
38
+ #import t5x.optimizers
39
+ #OPTIMIZER = @optax.adamw
40
+ #optax.adamw.learning_rate = %LR
41
+ #optax.adamw.weight_decay = 0.1
42
+
43
+
44
+ utils.create_learning_rate_scheduler:
45
+ factors = 'constant'
46
+ base_learning_rate = %LR
47
+ warmup_steps = 1000
48
+
49
+ # Might have to ba changed based on architecture
50
+ # partitioning.PjitPartitioner.num_partitions = 1
51
+
52
+
norbench_document_sentiment_bs128.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ PROJECT_DIR=${HOME}"/models/t5-nynorsk-norbench"
3
+ export PYTHONPATH=${PROJECT_DIR}
4
+ echo "PROJECT_DIR is set to: ${PROJECT_DIR}"
5
+
6
+
7
+ FINETUNE_STEPS=20000
8
+ EVAL_PREFIX="norbench/norbench_document_sentiment_bs128"
9
+ MODEL_BUCKET_DIR="gs://pere-north-t5x/finetuned/"
10
+
11
+ CHECKPOINT_LIST=(
12
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
13
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
14
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
15
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
16
+ )
17
+
18
+ NAME_LIST=(
19
+ "north_t5_base_NCC_LR_0_0006"
20
+ "north_t5_base_NCC_LR_0_0008"
21
+ "north_t5_base_NCC_LR_0_001"
22
+ "north_t5_base_NCC_LR_0_002"
23
+ )
24
+
25
+ TASK_LIST=("document_sentiment" "document_sentiment" "document_sentiment" "document_sentiment")
26
+ LR_LIST=(0.0006 0.0008 0.001 0.002)
27
+
28
+
29
+ GIN_LIST=(
30
+ "norbench_document_sentiment_bs128.gin"
31
+ "norbench_document_sentiment_bs128.gin"
32
+ "norbench_document_sentiment_bs128.gin"
33
+ "norbench_document_sentiment_bs128.gin"
34
+ )
35
+
36
+ START_LIST=(1500000 1500000 1500000 1500000)
37
+ EXP_LIST=(1 2 3 4)
38
+
39
+
40
+ op through each experiment in EXP_LIST
41
+ for i in "${!EXP_LIST[@]}"; do
42
+ INITIAL_CHECKPOINT_PATH="gs://${CHECKPOINT_LIST[i]}"
43
+ TRAIN_STEPS=$((START_LIST[i]+FINETUNE_STEPS))
44
+ GIN_FILE=${GIN_LIST[i]}
45
+ MIXTURE_OR_TASK_NAME=${TASK_LIST[i]}
46
+ LR=${LR_LIST[i]}
47
+ EXP=${EXP_LIST[i]}
48
+ MODEL_DIR="${MODEL_BUCKET_DIR}${EVAL_PREFIX}_exp${EXP}_${NAME_LIST[i]}"
49
+
50
+ command="python3 ../../t5x/t5x/train.py --gin_search_paths=\"./\" --gin.TRAIN_STEPS=${TRAIN_STEPS} --gin.LR=${LR} --gin_file=${GIN_FILE} --gin.INITIAL_CHECKPOINT_PATH=\\\"${INITIAL_CHECKPOINT_PATH}\\\" --gin.MIXTURE_OR_TASK_NAME=\\\"${MIXTURE_OR_TASK_NAME}\\\" --gin.MODEL_DIR=\\\"${MODEL_DIR}\\\""
51
+ echo "${command}"
52
+ # Uncomment the next line to run the command:
53
+ eval "${command}"
54
+ done
norbench_norcola_bs128.gin ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __gin__ import dynamic_registration
2
+ import tasks
3
+ import seqio
4
+ import optax
5
+
6
+ import __main__ as train_script
7
+ from t5.data import mixtures
8
+ from t5x import models
9
+ from t5x import partitioning
10
+ from t5x import utils
11
+
12
+ include 't5x/examples/t5/mt5/base.gin'
13
+ include "t5x/configs/runs/finetune.gin"
14
+
15
+ MIXTURE_OR_TASK_NAME = %gin.REQUIRED
16
+ TASK_FEATURE_LENGTHS = {"inputs": 256, "targets": 256}
17
+ INITIAL_CHECKPOINT_PATH = %gin.REQUIRED
18
+ LR = %gin.REQUIRED
19
+ TRAIN_STEPS = %gin.REQUIRED # 1000000 pre-trained steps + 10000 fine-tuning steps.
20
+ USE_CACHED_TASKS = False
21
+ DROPOUT_RATE = 0.1
22
+ RANDOM_SEED = 0
23
+ BATCH_SIZE = 128
24
+ EVAL_PERIOD = 1000
25
+
26
+ #Fixing a small error
27
+ infer_eval/utils.DatasetConfig:
28
+ task_feature_lengths = %TASK_FEATURE_LENGTHS
29
+
30
+ #Saving every 500 steps
31
+ utils.SaveCheckpointConfig:
32
+ period = 1000
33
+ keep = 1 # number of checkpoints to keep
34
+
35
+
36
+ #optax.adamw.weight_decay = 0.1
37
+ #OPTIMIZER = @optax.adamw
38
+ #import t5x.optimizers
39
+ #OPTIMIZER = @optax.adamw
40
+ #optax.adamw.learning_rate = %LR
41
+ #optax.adamw.weight_decay = 0.1
42
+
43
+
44
+ utils.create_learning_rate_scheduler:
45
+ factors = 'constant'
46
+ base_learning_rate = %LR
47
+ warmup_steps = 1000
48
+
49
+ # Might have to ba changed based on architecture
50
+ # partitioning.PjitPartitioner.num_partitions = 1
51
+
52
+
norbench_norcola_bs128.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ PROJECT_DIR=${HOME}"/models/t5-nynorsk-norbench"
3
+ export PYTHONPATH=${PROJECT_DIR}
4
+ echo "PROJECT_DIR is set to: ${PROJECT_DIR}"
5
+
6
+
7
+ FINETUNE_STEPS=5000
8
+ EVAL_PREFIX="norbench/norbench_norcola_bs128"
9
+ MODEL_BUCKET_DIR="gs://pere-north-t5x/finetuned/"
10
+
11
+ CHECKPOINT_LIST=(
12
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
13
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
14
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
15
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
16
+ )
17
+
18
+ NAME_LIST=(
19
+ "north_t5_base_NCC_LR_0_0006"
20
+ "north_t5_base_NCC_LR_0_0008"
21
+ "north_t5_base_NCC_LR_0_001"
22
+ "north_t5_base_NCC_LR_0_002"
23
+ )
24
+
25
+ TASK_LIST=("norcola" "norcola" "norcola" "norcola")
26
+ LR_LIST=(0.0006 0.0008 0.001 0.002)
27
+
28
+
29
+ GIN_LIST=(
30
+ "norbench_norcola_bs128.gin"
31
+ "norbench_norcola_bs128.gin"
32
+ "norbench_norcola_bs128.gin"
33
+ "norbench_norcola_bs128.gin"
34
+ )
35
+
36
+ START_LIST=(1500000 1500000 1500000 1500000)
37
+ EXP_LIST=(1 2 3 4)
38
+
39
+
40
+ op through each experiment in EXP_LIST
41
+ for i in "${!EXP_LIST[@]}"; do
42
+ INITIAL_CHECKPOINT_PATH="gs://${CHECKPOINT_LIST[i]}"
43
+ TRAIN_STEPS=$((START_LIST[i]+FINETUNE_STEPS))
44
+ GIN_FILE=${GIN_LIST[i]}
45
+ MIXTURE_OR_TASK_NAME=${TASK_LIST[i]}
46
+ LR=${LR_LIST[i]}
47
+ EXP=${EXP_LIST[i]}
48
+ MODEL_DIR="${MODEL_BUCKET_DIR}${EVAL_PREFIX}_exp${EXP}_${NAME_LIST[i]}"
49
+
50
+ command="python3 ../../t5x/t5x/train.py --gin_search_paths=\"./\" --gin.TRAIN_STEPS=${TRAIN_STEPS} --gin.LR=${LR} --gin_file=${GIN_FILE} --gin.INITIAL_CHECKPOINT_PATH=\\\"${INITIAL_CHECKPOINT_PATH}\\\" --gin.MIXTURE_OR_TASK_NAME=\\\"${MIXTURE_OR_TASK_NAME}\\\" --gin.MODEL_DIR=\\\"${MODEL_DIR}\\\""
51
+ echo "${command}"
52
+ # Uncomment the next line to run the command:
53
+ eval "${command}"
54
+ done
norbench_translate_bs128.gin ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __gin__ import dynamic_registration
2
+ import tasks
3
+ import seqio
4
+ import optax
5
+
6
+ import __main__ as train_script
7
+ from t5.data import mixtures
8
+ from t5x import models
9
+ from t5x import partitioning
10
+ from t5x import utils
11
+
12
+ include 't5x/examples/t5/mt5/base.gin'
13
+ include "t5x/configs/runs/finetune.gin"
14
+
15
+ MIXTURE_OR_TASK_NAME = %gin.REQUIRED
16
+ TASK_FEATURE_LENGTHS = {"inputs": 256, "targets": 256}
17
+ INITIAL_CHECKPOINT_PATH = %gin.REQUIRED
18
+ LR = %gin.REQUIRED
19
+ TRAIN_STEPS = %gin.REQUIRED # 1000000 pre-trained steps + 10000 fine-tuning steps.
20
+ USE_CACHED_TASKS = False
21
+ DROPOUT_RATE = 0.1
22
+ RANDOM_SEED = 0
23
+ BATCH_SIZE = 128
24
+ EVAL_PERIOD = 1000
25
+
26
+ #Fixing a small error
27
+ infer_eval/utils.DatasetConfig:
28
+ task_feature_lengths = %TASK_FEATURE_LENGTHS
29
+
30
+ #Saving every 500 steps
31
+ utils.SaveCheckpointConfig:
32
+ period = 1000
33
+ keep = 1 # number of checkpoints to keep
34
+
35
+
36
+ #optax.adamw.weight_decay = 0.1
37
+ #OPTIMIZER = @optax.adamw
38
+ #import t5x.optimizers
39
+ #OPTIMIZER = @optax.adamw
40
+ #optax.adamw.learning_rate = %LR
41
+ #optax.adamw.weight_decay = 0.1
42
+
43
+
44
+ utils.create_learning_rate_scheduler:
45
+ factors = 'constant'
46
+ base_learning_rate = %LR
47
+ warmup_steps = 1000
48
+
49
+ # Might have to ba changed based on architecture
50
+ # partitioning.PjitPartitioner.num_partitions = 1
51
+
52
+
norbench_translate_bs128.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ PROJECT_DIR=${HOME}"/models/t5-nynorsk-norbench"
3
+ export PYTHONPATH=${PROJECT_DIR}
4
+ echo "PROJECT_DIR is set to: ${PROJECT_DIR}"
5
+
6
+
7
+ FINETUNE_STEPS=20000
8
+ EVAL_PREFIX="norbench/norbench_translate_bs128"
9
+ MODEL_BUCKET_DIR="gs://pere-north-t5x/finetuned/"
10
+
11
+ CHECKPOINT_LIST=(
12
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
13
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
14
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
15
+ "pere-north-t5x/pretrained_models/base/norwegian_NCC_plus_English_t5x_base/checkpoint_1500000"
16
+ )
17
+
18
+ NAME_LIST=(
19
+ "north_t5_base_NCC_LR_0_0006"
20
+ "north_t5_base_NCC_LR_0_0008"
21
+ "north_t5_base_NCC_LR_0_001"
22
+ "north_t5_base_NCC_LR_0_002"
23
+ )
24
+
25
+ TASK_LIST=("translate_mt5" "translate_mt5" "translate_mt5" "translate_mt5")
26
+ LR_LIST=(0.0006 0.0008 0.001 0.002)
27
+
28
+
29
+ GIN_LIST=(
30
+ "norbench_translate_bs128.gin"
31
+ "norbench_translate_bs128.gin"
32
+ "norbench_translate_bs128.gin"
33
+ "norbench_translate_bs128.gin"
34
+ )
35
+
36
+ START_LIST=(1500000 1500000 1500000 1500000)
37
+ EXP_LIST=(1 2 3 4)
38
+
39
+
40
+ op through each experiment in EXP_LIST
41
+ for i in "${!EXP_LIST[@]}"; do
42
+ INITIAL_CHECKPOINT_PATH="gs://${CHECKPOINT_LIST[i]}"
43
+ TRAIN_STEPS=$((START_LIST[i]+FINETUNE_STEPS))
44
+ GIN_FILE=${GIN_LIST[i]}
45
+ MIXTURE_OR_TASK_NAME=${TASK_LIST[i]}
46
+ LR=${LR_LIST[i]}
47
+ EXP=${EXP_LIST[i]}
48
+ MODEL_DIR="${MODEL_BUCKET_DIR}${EVAL_PREFIX}_exp${EXP}_${NAME_LIST[i]}"
49
+
50
+ command="python3 ../../t5x/t5x/train.py --gin_search_paths=\"./\" --gin.TRAIN_STEPS=${TRAIN_STEPS} --gin.LR=${LR} --gin_file=${GIN_FILE} --gin.INITIAL_CHECKPOINT_PATH=\\\"${INITIAL_CHECKPOINT_PATH}\\\" --gin.MIXTURE_OR_TASK_NAME=\\\"${MIXTURE_OR_TASK_NAME}\\\" --gin.MODEL_DIR=\\\"${MODEL_DIR}\\\""
51
+ echo "${command}"
52
+ # Uncomment the next line to run the command:
53
+ eval "${command}"
54
+ done