Training in progress, step 500
Browse files- .ipynb_checkpoints/lora_orpo-checkpoint.yaml +43 -0
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- lora_orpo.yaml +43 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +45 -0
- trainer_log.jsonl +51 -0
- training_args.bin +3 -0
.ipynb_checkpoints/lora_orpo-checkpoint.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### model
|
2 |
+
model_name_or_path: lmsys/vicuna-7b-v1.5
|
3 |
+
|
4 |
+
### method
|
5 |
+
stage: orpo
|
6 |
+
do_train: true
|
7 |
+
finetuning_type: lora
|
8 |
+
lora_target: all
|
9 |
+
|
10 |
+
### dataset
|
11 |
+
dataset: dpo_mix_en,bct_non_cot_dpo_1000
|
12 |
+
dataset_dir: data_private
|
13 |
+
template: vicuna
|
14 |
+
cutoff_len: 1024
|
15 |
+
# max_samples: 1000
|
16 |
+
overwrite_cache: true
|
17 |
+
preprocessing_num_workers: 16
|
18 |
+
|
19 |
+
### output
|
20 |
+
output_dir: saves/Vicuna-7B-v1.5/lora/orpo-salt
|
21 |
+
logging_steps: 10
|
22 |
+
save_steps: 500
|
23 |
+
plot_loss: true
|
24 |
+
overwrite_output_dir: true
|
25 |
+
save_total_limit: 3
|
26 |
+
load_best_model_at_end: true
|
27 |
+
push_to_hub: true
|
28 |
+
hub_model_id: chchen/Vicuna-7B-v1.5-ORPO-SALT
|
29 |
+
|
30 |
+
### train
|
31 |
+
per_device_train_batch_size: 2
|
32 |
+
gradient_accumulation_steps: 8
|
33 |
+
learning_rate: 0.000005
|
34 |
+
num_train_epochs: 3.0
|
35 |
+
lr_scheduler_type: cosine
|
36 |
+
warmup_steps: 0.1
|
37 |
+
bf16: true
|
38 |
+
|
39 |
+
### eval
|
40 |
+
val_size: 0.1
|
41 |
+
per_device_eval_batch_size: 2
|
42 |
+
evaluation_strategy: steps
|
43 |
+
eval_steps: 500
|
adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "lmsys/vicuna-7b-v1.5",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"q_proj",
|
24 |
+
"o_proj",
|
25 |
+
"up_proj",
|
26 |
+
"down_proj",
|
27 |
+
"gate_proj",
|
28 |
+
"v_proj",
|
29 |
+
"k_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51605f9cca2a85cc2ed9c4069609caa3141db7c79b539dbff89d1aa1e5842f63
|
3 |
+
size 80013120
|
lora_orpo.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### model
|
2 |
+
model_name_or_path: lmsys/vicuna-7b-v1.5
|
3 |
+
|
4 |
+
### method
|
5 |
+
stage: orpo
|
6 |
+
do_train: true
|
7 |
+
finetuning_type: lora
|
8 |
+
lora_target: all
|
9 |
+
|
10 |
+
### dataset
|
11 |
+
dataset: dpo_mix_en,bct_non_cot_dpo_1000
|
12 |
+
dataset_dir: data_private
|
13 |
+
template: vicuna
|
14 |
+
cutoff_len: 1024
|
15 |
+
# max_samples: 1000
|
16 |
+
overwrite_cache: true
|
17 |
+
preprocessing_num_workers: 16
|
18 |
+
|
19 |
+
### output
|
20 |
+
output_dir: saves/Vicuna-7B-v1.5/lora/orpo-salt
|
21 |
+
logging_steps: 10
|
22 |
+
save_steps: 500
|
23 |
+
plot_loss: true
|
24 |
+
overwrite_output_dir: true
|
25 |
+
save_total_limit: 3
|
26 |
+
load_best_model_at_end: true
|
27 |
+
push_to_hub: true
|
28 |
+
hub_model_id: chchen/Vicuna-7B-v1.5-ORPO-SALT
|
29 |
+
|
30 |
+
### train
|
31 |
+
per_device_train_batch_size: 2
|
32 |
+
gradient_accumulation_steps: 8
|
33 |
+
learning_rate: 0.000005
|
34 |
+
num_train_epochs: 3.0
|
35 |
+
lr_scheduler_type: cosine
|
36 |
+
warmup_steps: 0.1
|
37 |
+
bf16: true
|
38 |
+
|
39 |
+
### eval
|
40 |
+
val_size: 0.1
|
41 |
+
per_device_eval_batch_size: 2
|
42 |
+
evaluation_strategy: steps
|
43 |
+
eval_steps: 500
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": true,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
}
|
30 |
+
},
|
31 |
+
"bos_token": "<s>",
|
32 |
+
"chat_template": "{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'USER: ' + content + ' ASSISTANT:' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
|
33 |
+
"clean_up_tokenization_spaces": false,
|
34 |
+
"eos_token": "</s>",
|
35 |
+
"legacy": false,
|
36 |
+
"model_max_length": 4096,
|
37 |
+
"pad_token": "<unk>",
|
38 |
+
"padding_side": "right",
|
39 |
+
"sp_model_kwargs": {},
|
40 |
+
"spaces_between_special_tokens": false,
|
41 |
+
"split_special_tokens": false,
|
42 |
+
"tokenizer_class": "LlamaTokenizer",
|
43 |
+
"unk_token": "<unk>",
|
44 |
+
"use_default_system_prompt": false
|
45 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 10, "total_steps": 1854, "loss": 1.146, "accuracy": 0.574999988079071, "learning_rate": 4.999648198770648e-06, "epoch": 0.01616488179430188, "percentage": 0.54, "elapsed_time": "0:01:30", "remaining_time": "4:38:14"}
|
2 |
+
{"current_steps": 20, "total_steps": 1854, "loss": 1.1535, "accuracy": 0.5874999761581421, "learning_rate": 4.998578646361359e-06, "epoch": 0.03232976358860376, "percentage": 1.08, "elapsed_time": "0:03:03", "remaining_time": "4:41:03"}
|
3 |
+
{"current_steps": 30, "total_steps": 1854, "loss": 1.1699, "accuracy": 0.42500001192092896, "learning_rate": 4.996791614004449e-06, "epoch": 0.04849464538290564, "percentage": 1.62, "elapsed_time": "0:04:37", "remaining_time": "4:40:56"}
|
4 |
+
{"current_steps": 40, "total_steps": 1854, "loss": 1.2171, "accuracy": 0.48124998807907104, "learning_rate": 4.994287614855618e-06, "epoch": 0.06465952717720752, "percentage": 2.16, "elapsed_time": "0:06:03", "remaining_time": "4:35:03"}
|
5 |
+
{"current_steps": 50, "total_steps": 1854, "loss": 1.1729, "accuracy": 0.5, "learning_rate": 4.991067367951343e-06, "epoch": 0.0808244089715094, "percentage": 2.7, "elapsed_time": "0:07:44", "remaining_time": "4:39:25"}
|
6 |
+
{"current_steps": 60, "total_steps": 1854, "loss": 1.2007, "accuracy": 0.5249999761581421, "learning_rate": 4.987131798002389e-06, "epoch": 0.09698929076581128, "percentage": 3.24, "elapsed_time": "0:09:14", "remaining_time": "4:36:26"}
|
7 |
+
{"current_steps": 70, "total_steps": 1854, "loss": 1.2342, "accuracy": 0.5, "learning_rate": 4.982482035128285e-06, "epoch": 0.11315417256011315, "percentage": 3.78, "elapsed_time": "0:10:48", "remaining_time": "4:35:15"}
|
8 |
+
{"current_steps": 80, "total_steps": 1854, "loss": 1.0496, "accuracy": 0.5249999761581421, "learning_rate": 4.9771194145328e-06, "epoch": 0.12931905435441504, "percentage": 4.31, "elapsed_time": "0:12:22", "remaining_time": "4:34:15"}
|
9 |
+
{"current_steps": 90, "total_steps": 1854, "loss": 1.1086, "accuracy": 0.4937500059604645, "learning_rate": 4.971045476120532e-06, "epoch": 0.1454839361487169, "percentage": 4.85, "elapsed_time": "0:13:55", "remaining_time": "4:33:04"}
|
10 |
+
{"current_steps": 100, "total_steps": 1854, "loss": 1.1637, "accuracy": 0.5062500238418579, "learning_rate": 4.964261964054713e-06, "epoch": 0.1616488179430188, "percentage": 5.39, "elapsed_time": "0:15:28", "remaining_time": "4:31:29"}
|
11 |
+
{"current_steps": 110, "total_steps": 1854, "loss": 1.1606, "accuracy": 0.512499988079071, "learning_rate": 4.956770826256372e-06, "epoch": 0.17781369973732067, "percentage": 5.93, "elapsed_time": "0:17:03", "remaining_time": "4:30:24"}
|
12 |
+
{"current_steps": 120, "total_steps": 1854, "loss": 1.1411, "accuracy": 0.5, "learning_rate": 4.94857421384497e-06, "epoch": 0.19397858153162256, "percentage": 6.47, "elapsed_time": "0:18:37", "remaining_time": "4:29:00"}
|
13 |
+
{"current_steps": 130, "total_steps": 1854, "loss": 1.0644, "accuracy": 0.518750011920929, "learning_rate": 4.939674480520701e-06, "epoch": 0.21014346332592443, "percentage": 7.01, "elapsed_time": "0:20:14", "remaining_time": "4:28:31"}
|
14 |
+
{"current_steps": 140, "total_steps": 1854, "loss": 1.0811, "accuracy": 0.4937500059604645, "learning_rate": 4.930074181888613e-06, "epoch": 0.2263083451202263, "percentage": 7.55, "elapsed_time": "0:21:55", "remaining_time": "4:28:20"}
|
15 |
+
{"current_steps": 150, "total_steps": 1854, "loss": 1.0929, "accuracy": 0.5, "learning_rate": 4.91977607472475e-06, "epoch": 0.2424732269145282, "percentage": 8.09, "elapsed_time": "0:23:23", "remaining_time": "4:25:42"}
|
16 |
+
{"current_steps": 160, "total_steps": 1854, "loss": 1.02, "accuracy": 0.550000011920929, "learning_rate": 4.908783116184534e-06, "epoch": 0.2586381087088301, "percentage": 8.63, "elapsed_time": "0:24:55", "remaining_time": "4:23:58"}
|
17 |
+
{"current_steps": 170, "total_steps": 1854, "loss": 1.0464, "accuracy": 0.5062500238418579, "learning_rate": 4.897098462953598e-06, "epoch": 0.27480299050313195, "percentage": 9.17, "elapsed_time": "0:26:28", "remaining_time": "4:22:18"}
|
18 |
+
{"current_steps": 180, "total_steps": 1854, "loss": 0.9499, "accuracy": 0.612500011920929, "learning_rate": 4.884725470341331e-06, "epoch": 0.2909678722974338, "percentage": 9.71, "elapsed_time": "0:28:01", "remaining_time": "4:20:37"}
|
19 |
+
{"current_steps": 190, "total_steps": 1854, "loss": 1.2649, "accuracy": 0.40625, "learning_rate": 4.871667691317377e-06, "epoch": 0.3071327540917357, "percentage": 10.25, "elapsed_time": "0:29:35", "remaining_time": "4:19:07"}
|
20 |
+
{"current_steps": 200, "total_steps": 1854, "loss": 0.9612, "accuracy": 0.5, "learning_rate": 4.857928875491392e-06, "epoch": 0.3232976358860376, "percentage": 10.79, "elapsed_time": "0:31:04", "remaining_time": "4:16:56"}
|
21 |
+
{"current_steps": 210, "total_steps": 1854, "loss": 1.0514, "accuracy": 0.4937500059604645, "learning_rate": 4.843512968036314e-06, "epoch": 0.33946251768033947, "percentage": 11.33, "elapsed_time": "0:32:32", "remaining_time": "4:14:45"}
|
22 |
+
{"current_steps": 220, "total_steps": 1854, "loss": 1.2641, "accuracy": 0.543749988079071, "learning_rate": 4.828424108555486e-06, "epoch": 0.35562739947464134, "percentage": 11.87, "elapsed_time": "0:34:06", "remaining_time": "4:13:17"}
|
23 |
+
{"current_steps": 230, "total_steps": 1854, "loss": 1.0744, "accuracy": 0.44999998807907104, "learning_rate": 4.812666629893957e-06, "epoch": 0.3717922812689432, "percentage": 12.41, "elapsed_time": "0:35:38", "remaining_time": "4:11:38"}
|
24 |
+
{"current_steps": 240, "total_steps": 1854, "loss": 1.0315, "accuracy": 0.4749999940395355, "learning_rate": 4.796245056894273e-06, "epoch": 0.3879571630632451, "percentage": 12.94, "elapsed_time": "0:37:16", "remaining_time": "4:10:37"}
|
25 |
+
{"current_steps": 250, "total_steps": 1854, "loss": 0.9923, "accuracy": 0.4937500059604645, "learning_rate": 4.779164105097148e-06, "epoch": 0.404122044857547, "percentage": 13.48, "elapsed_time": "0:38:52", "remaining_time": "4:09:24"}
|
26 |
+
{"current_steps": 260, "total_steps": 1854, "loss": 0.9591, "accuracy": 0.518750011920929, "learning_rate": 4.761428679387373e-06, "epoch": 0.42028692665184886, "percentage": 14.02, "elapsed_time": "0:40:27", "remaining_time": "4:08:00"}
|
27 |
+
{"current_steps": 270, "total_steps": 1854, "loss": 0.984, "accuracy": 0.5562499761581421, "learning_rate": 4.7430438725853515e-06, "epoch": 0.4364518084461507, "percentage": 14.56, "elapsed_time": "0:42:01", "remaining_time": "4:06:32"}
|
28 |
+
{"current_steps": 280, "total_steps": 1854, "loss": 1.0765, "accuracy": 0.512499988079071, "learning_rate": 4.724014963984669e-06, "epoch": 0.4526166902404526, "percentage": 15.1, "elapsed_time": "0:43:38", "remaining_time": "4:05:22"}
|
29 |
+
{"current_steps": 290, "total_steps": 1854, "loss": 1.0089, "accuracy": 0.543749988079071, "learning_rate": 4.704347417836116e-06, "epoch": 0.4687815720347545, "percentage": 15.64, "elapsed_time": "0:45:12", "remaining_time": "4:03:48"}
|
30 |
+
{"current_steps": 300, "total_steps": 1854, "loss": 0.9833, "accuracy": 0.512499988079071, "learning_rate": 4.684046881778603e-06, "epoch": 0.4849464538290564, "percentage": 16.18, "elapsed_time": "0:46:43", "remaining_time": "4:01:59"}
|
31 |
+
{"current_steps": 310, "total_steps": 1854, "loss": 0.954, "accuracy": 0.550000011920929, "learning_rate": 4.663119185217409e-06, "epoch": 0.5011113356233583, "percentage": 16.72, "elapsed_time": "0:48:18", "remaining_time": "4:00:34"}
|
32 |
+
{"current_steps": 320, "total_steps": 1854, "loss": 0.9498, "accuracy": 0.53125, "learning_rate": 4.641570337650232e-06, "epoch": 0.5172762174176602, "percentage": 17.26, "elapsed_time": "0:49:51", "remaining_time": "3:59:01"}
|
33 |
+
{"current_steps": 330, "total_steps": 1854, "loss": 1.0373, "accuracy": 0.4749999940395355, "learning_rate": 4.61940652694154e-06, "epoch": 0.533441099211962, "percentage": 17.8, "elapsed_time": "0:51:29", "remaining_time": "3:57:50"}
|
34 |
+
{"current_steps": 340, "total_steps": 1854, "loss": 0.9917, "accuracy": 0.4749999940395355, "learning_rate": 4.596634117545689e-06, "epoch": 0.5496059810062639, "percentage": 18.34, "elapsed_time": "0:53:02", "remaining_time": "3:56:11"}
|
35 |
+
{"current_steps": 350, "total_steps": 1854, "loss": 0.9987, "accuracy": 0.48124998807907104, "learning_rate": 4.573259648679335e-06, "epoch": 0.5657708628005658, "percentage": 18.88, "elapsed_time": "0:54:39", "remaining_time": "3:54:51"}
|
36 |
+
{"current_steps": 360, "total_steps": 1854, "loss": 0.9737, "accuracy": 0.518750011920929, "learning_rate": 4.549289832443663e-06, "epoch": 0.5819357445948676, "percentage": 19.42, "elapsed_time": "0:56:14", "remaining_time": "3:53:23"}
|
37 |
+
{"current_steps": 370, "total_steps": 1854, "loss": 0.8918, "accuracy": 0.4937500059604645, "learning_rate": 4.524731551896978e-06, "epoch": 0.5981006263891695, "percentage": 19.96, "elapsed_time": "0:57:48", "remaining_time": "3:51:52"}
|
38 |
+
{"current_steps": 380, "total_steps": 1854, "loss": 1.0132, "accuracy": 0.543749988079071, "learning_rate": 4.4995918590781925e-06, "epoch": 0.6142655081834714, "percentage": 20.5, "elapsed_time": "0:59:23", "remaining_time": "3:50:21"}
|
39 |
+
{"current_steps": 390, "total_steps": 1854, "loss": 0.9681, "accuracy": 0.581250011920929, "learning_rate": 4.473877972981797e-06, "epoch": 0.6304303899777733, "percentage": 21.04, "elapsed_time": "1:01:01", "remaining_time": "3:49:04"}
|
40 |
+
{"current_steps": 400, "total_steps": 1854, "loss": 0.971, "accuracy": 0.5, "learning_rate": 4.447597277484894e-06, "epoch": 0.6465952717720752, "percentage": 21.57, "elapsed_time": "1:02:32", "remaining_time": "3:47:20"}
|
41 |
+
{"current_steps": 410, "total_steps": 1854, "loss": 1.0559, "accuracy": 0.4749999940395355, "learning_rate": 4.42075731922687e-06, "epoch": 0.6627601535663771, "percentage": 22.11, "elapsed_time": "1:04:04", "remaining_time": "3:45:38"}
|
42 |
+
{"current_steps": 420, "total_steps": 1854, "loss": 0.9348, "accuracy": 0.518750011920929, "learning_rate": 4.3933658054423465e-06, "epoch": 0.6789250353606789, "percentage": 22.65, "elapsed_time": "1:05:37", "remaining_time": "3:44:02"}
|
43 |
+
{"current_steps": 430, "total_steps": 1854, "loss": 1.0372, "accuracy": 0.5062500238418579, "learning_rate": 4.365430601748003e-06, "epoch": 0.6950899171549808, "percentage": 23.19, "elapsed_time": "1:07:08", "remaining_time": "3:42:20"}
|
44 |
+
{"current_steps": 440, "total_steps": 1854, "loss": 0.9849, "accuracy": 0.5062500238418579, "learning_rate": 4.336959729883925e-06, "epoch": 0.7112547989492827, "percentage": 23.73, "elapsed_time": "1:08:41", "remaining_time": "3:40:45"}
|
45 |
+
{"current_steps": 450, "total_steps": 1854, "loss": 0.9756, "accuracy": 0.512499988079071, "learning_rate": 4.307961365410118e-06, "epoch": 0.7274196807435845, "percentage": 24.27, "elapsed_time": "1:10:18", "remaining_time": "3:39:21"}
|
46 |
+
{"current_steps": 460, "total_steps": 1854, "loss": 0.9449, "accuracy": 0.6000000238418579, "learning_rate": 4.278443835358854e-06, "epoch": 0.7435845625378864, "percentage": 24.81, "elapsed_time": "1:11:52", "remaining_time": "3:37:50"}
|
47 |
+
{"current_steps": 470, "total_steps": 1854, "loss": 0.9817, "accuracy": 0.5, "learning_rate": 4.248415615843523e-06, "epoch": 0.7597494443321883, "percentage": 25.35, "elapsed_time": "1:13:20", "remaining_time": "3:35:59"}
|
48 |
+
{"current_steps": 480, "total_steps": 1854, "loss": 0.9413, "accuracy": 0.6187499761581421, "learning_rate": 4.217885329624666e-06, "epoch": 0.7759143261264903, "percentage": 25.89, "elapsed_time": "1:14:54", "remaining_time": "3:34:24"}
|
49 |
+
{"current_steps": 490, "total_steps": 1854, "loss": 0.9699, "accuracy": 0.5062500238418579, "learning_rate": 4.186861743633911e-06, "epoch": 0.7920792079207921, "percentage": 26.43, "elapsed_time": "1:16:26", "remaining_time": "3:32:46"}
|
50 |
+
{"current_steps": 500, "total_steps": 1854, "loss": 1.0008, "accuracy": 0.518750011920929, "learning_rate": 4.155353766456497e-06, "epoch": 0.808244089715094, "percentage": 26.97, "elapsed_time": "1:18:02", "remaining_time": "3:31:21"}
|
51 |
+
{"current_steps": 500, "total_steps": 1854, "eval_loss": 0.9776538014411926, "epoch": 0.808244089715094, "percentage": 26.97, "elapsed_time": "1:21:15", "remaining_time": "3:40:02"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0daaa3748d36f23ee157795a840cb87dc15f642f995592a2f234360b104f878
|
3 |
+
size 5240
|