Chen YiJia commited on
Commit
5cedadf
1 Parent(s): 69918e5

first epoch

Browse files
first_epoch/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "outputs",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "mt5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.27.4",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
first_epoch/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 2.8873880844224584
first_epoch/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.27.4"
7
+ }
first_epoch/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": false, "adafactor_scale_parameter": false, "adafactor_warmup_init": false, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "eval_batch_size": 20, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 200, "evaluate_during_training_verbose": false, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.001, "local_rank": -1, "logging_steps": 50, "loss_type": null, "loss_args": {}, "manual_seed": null, "max_grad_norm": 1.0, "max_seq_length": 96, "model_name": "outputs", "model_type": "mt5", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": true, "no_save": false, "not_saved_args": [], "num_train_epochs": 4, "optimizer": "Adafactor", "output_dir": "outputs/", "overwrite_output_dir": false, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 2, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "constant_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 20, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": true, "wandb_kwargs": {}, "wandb_project": "MT5 English-Chinese Translation", "warmup_ratio": 0.06, "warmup_steps": 5551, "weight_decay": 0.0, "model_class": "T5Model", "dataset_class": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": false, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "preprocess_inputs": false, "repetition_penalty": 1.0, "special_tokens_list": [], "top_k": null, "top_p": null, "use_multiprocessed_decoding": true}
first_epoch/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc41bbe83da72953f4a81cb7c890d2e72173ffbf78b53c99bb921f8de97089e8
3
+ size 2879429
first_epoch/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70817e39971bcc7b3fa896c7ccb218f90d4ab826f6c22d91e6acccd6da7501d
3
+ size 1200772485
first_epoch/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:439a6cd8a3b103eaafbd3c0ef26fb99265c72def63ccb9ca9b250e82a1d9d11c
3
+ size 627
first_epoch/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
first_epoch/spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
first_epoch/tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "eos_token": "</s>",
4
+ "extra_ids": 0,
5
+ "model_max_length": 1000000000000000019884624838656,
6
+ "pad_token": "<pad>",
7
+ "sp_model_kwargs": {},
8
+ "special_tokens_map_file": "/home/ec2-user/.cache/huggingface/hub/models--google--mt5-small/snapshots/38f23af8ec210eb6c376d40e9c56bd25a80f195d/special_tokens_map.json",
9
+ "tokenizer_class": "T5Tokenizer",
10
+ "truncate": true,
11
+ "unk_token": "<unk>"
12
+ }
first_epoch/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98a8c70371bd2f02f5ec3c8e670ec3c404c1635958b16e861bc0a61c11beefc5
3
+ size 3195
test.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets.load import load_dataset
2
+ import logging
3
+ import sacrebleu
4
+ import pandas as pd
5
+ from simpletransformers.t5 import T5Model, T5Args
6
+
7
+ raw_datasets = load_dataset('iwslt2017', 'iwslt2017-zh-en')
8
+
9
+ logging.basicConfig(level=logging.INFO)
10
+ transformers_logger = logging.getLogger("transformers")
11
+ transformers_logger.setLevel(logging.WARNING)
12
+
13
+
14
+ model_args = T5Args()
15
+ model_args.max_length = 512
16
+ model_args.length_penalty = 1
17
+ model_args.num_beams = 10
18
+
19
+ model = T5Model("mt5", "outputs", args=model_args)
20
+
21
+ en_zh_test = pd.DataFrame(raw_datasets['test']['translation'])
22
+ zh_truth = en_zh_test['zh'].tolist()
23
+ en_input = en_zh_test['en'].tolist()
24
+
25
+ zh_preds = model.predict(en_input)
26
+ en_zh_bleu = sacrebleu.corpus_bleu(zh_preds, zh_truth)
27
+ print("----------------------------------------------")
28
+ print("English to Chinese: ", en_zh_bleu.score)
29
+
30
+ en_preds = model.predict(zh_truth)
31
+ zh_en_bleu = sacrebleu.corpus_bleu(en_preds, en_input)
32
+ print("----------------------------------------------")
33
+ print("Chinese to English: ", zh_en_bleu.score)
train.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets.load import load_dataset
2
+ import pandas as pd
3
+ import logging
4
+ from simpletransformers.t5 import T5Args, T5Model
5
+
6
+ logging.basicConfig(level=logging.INFO)
7
+ transformers_logger = logging.getLogger("transformers")
8
+ transformers_logger.setLevel(logging.WARNING)
9
+
10
+ raw_datasets = load_dataset('iwslt2017', 'iwslt2017-zh-en')
11
+
12
+ train_df = pd.DataFrame(raw_datasets['train']['translation'])
13
+ train_df.columns = ['input_text', 'target_text']
14
+ reverse_df = train_df.copy()
15
+ reverse_df.columns = ['target_text', 'input_text']
16
+ train_df['prefix'] = 'translate english to chinese'
17
+ reverse_df['prefix'] = 'translate chinese to english'
18
+ train_df = pd.concat([train_df, reverse_df])
19
+
20
+ eval_df = pd.DataFrame(raw_datasets['validation']['translation'])
21
+ eval_df.columns = ['input_text', 'target_text']
22
+ reverse_df = eval_df.copy()
23
+ reverse_df.columns = ['target_text', 'input_text']
24
+ eval_df['prefix'] = 'translate english to chinese'
25
+ reverse_df['prefix'] = 'translate chinese to english'
26
+ eval_df = pd.concat([eval_df, reverse_df])
27
+
28
+ model_args = T5Args()
29
+ model_args.max_seq_length = 96
30
+ model_args.train_batch_size = 20
31
+ model_args.eval_batch_size = 20
32
+ model_args.num_train_epochs = 4
33
+ model_args.evaluate_during_training = True
34
+ model_args.evaluate_during_training_steps = 5000
35
+ model_args.use_multiprocessing = False
36
+ model_args.fp16 = False
37
+ model_args.save_steps = -1
38
+ model_args.save_model_every_epoch = True
39
+ model_args.save_eval_checkpoints = False
40
+ model_args.no_cache = True
41
+ model_args.reprocess_input_data = True
42
+ model_args.overwrite_output_dir = False
43
+ model_args.preprocess_inputs = False
44
+ model_args.num_return_sequences = 1
45
+ model_args.wandb_project = "MT5 English-Chinese Translation"
46
+
47
+ model = T5Model("mt5", "outputs", args=model_args)
48
+
49
+ model.train_model(train_df, eval_data=eval_df, output_dir='mt5_more_epochs')