Merge branch 'main' of https://huggingface.co/pere/eu-jav-categorisation into main
Browse files
batch_finetune_eu_jav_base_exp_dropout_0.sh
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
PROJECT_DIR=${HOME}"/models/eu-jav-categorisation"
|
2 |
+
export PYTHONPATH=${PROJECT_DIR}
|
3 |
+
INITIAL_CHECKPOINT_PATH=\"gs://t5-data/pretrained_models/t5x/mt5_base/checkpoint_1000000\"
|
4 |
+
TRAIN_STEPS=1010000
|
5 |
+
|
6 |
+
python3 ../../t5x/t5x/train.py --gin_search_paths="./" --gin.TRAIN_STEPS=${TRAIN_STEPS} --gin_file="finetune_classification_base_exp_dropout_0.gin" --gin.INITIAL_CHECKPOINT_PATH=${INITIAL_CHECKPOINT_PATH} --gin.MIXTURE_OR_TASK_NAME=\"classify_tweets\" --gin.MODEL_DIR=\"gs://eu-jav-t5x/finetuned/italian_tweets/exp_dropout_0_classify_tweets_base_v1\"
|
7 |
+
|
finetune_classification_base_exp_dropout_0.gin
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __gin__ import dynamic_registration
|
2 |
+
import tasks
|
3 |
+
|
4 |
+
import __main__ as train_script
|
5 |
+
from t5.data import mixtures
|
6 |
+
from t5x import models
|
7 |
+
from t5x import partitioning
|
8 |
+
from t5x import utils
|
9 |
+
|
10 |
+
include "t5x/examples/t5/mt5/base.gin"
|
11 |
+
include "t5x/configs/runs/finetune.gin"
|
12 |
+
|
13 |
+
MIXTURE_OR_TASK_NAME = %gin.REQUIRED
|
14 |
+
TASK_FEATURE_LENGTHS = {"inputs": 256, "targets": 2}
|
15 |
+
INITIAL_CHECKPOINT_PATH = %gin.REQUIRED
|
16 |
+
TRAIN_STEPS = %gin.REQUIRED # 1000000 pre-trained steps + 10000 fine-tuning steps.
|
17 |
+
USE_CACHED_TASKS = False
|
18 |
+
DROPOUT_RATE = 0
|
19 |
+
RANDOM_SEED = 0
|
20 |
+
|
21 |
+
#Fixing a small error
|
22 |
+
infer_eval/utils.DatasetConfig:
|
23 |
+
task_feature_lengths = %TASK_FEATURE_LENGTHS
|
24 |
+
|
25 |
+
#Saving every 1000 steps
|
26 |
+
utils.SaveCheckpointConfig:
|
27 |
+
period = 500
|
28 |
+
|
29 |
+
|
30 |
+
# Pere: Only necessary if we load a t5 model. We can start with an t5x model here
|
31 |
+
# `LOSS_NORMALIZING_FACTOR`: When fine-tuning a model that was pre-trained
|
32 |
+
# using Mesh Tensorflow (e.g. the public T5 / mT5 / ByT5 models), this should be
|
33 |
+
# set to `pretraining batch_size` * `target_token_length`. For T5 and T5.1.1:
|
34 |
+
# `2048 * 114`. For mT5: `1024 * 229`. For ByT5: `1024 * 189`.
|
35 |
+
# LOSS_NORMALIZING_FACTOR = 234496
|
36 |
+
|
37 |
+
# Might have to ba changed based on architecture
|
38 |
+
# partitioning.PjitPartitioner.num_partitions = 1
|
39 |
+
|