End of training
Browse files- README.md +12 -9
- config.json +1 -1
- model.safetensors +1 -1
- runs/Dec17_11-57-45_f20db7578e83/events.out.tfevents.1702814266.f20db7578e83.6609.3 +3 -0
- tokenizer.json +1 -6
- tokenizer_config.json +4 -0
- training_args.bin +1 -1
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
-
base_model:
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
@@ -22,7 +22,7 @@ model-index:
|
|
22 |
metrics:
|
23 |
- name: Bleu
|
24 |
type: bleu
|
25 |
-
value: 0.
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -30,11 +30,11 @@ should probably proofread and complete it, then remove this comment. -->
|
|
30 |
|
31 |
# opus_books_es_pt
|
32 |
|
33 |
-
This model is a fine-tuned version of [
|
34 |
It achieves the following results on the evaluation set:
|
35 |
-
- Loss: 3.
|
36 |
-
- Bleu: 0.
|
37 |
-
- Gen Len: 18.
|
38 |
|
39 |
## Model description
|
40 |
|
@@ -59,15 +59,18 @@ The following hyperparameters were used during training:
|
|
59 |
- seed: 42
|
60 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
61 |
- lr_scheduler_type: linear
|
62 |
-
- num_epochs:
|
63 |
- mixed_precision_training: Native AMP
|
64 |
|
65 |
### Training results
|
66 |
|
67 |
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|
68 |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
|
69 |
-
| No log | 1.0 | 53 | 3.
|
70 |
-
| No log | 2.0 | 106 | 3.
|
|
|
|
|
|
|
71 |
|
72 |
|
73 |
### Framework versions
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
+
base_model: oSabre/opus_books_es_pt
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
|
|
22 |
metrics:
|
23 |
- name: Bleu
|
24 |
type: bleu
|
25 |
+
value: 0.5339
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
30 |
|
31 |
# opus_books_es_pt
|
32 |
|
33 |
+
This model is a fine-tuned version of [oSabre/opus_books_es_pt](https://huggingface.co/oSabre/opus_books_es_pt) on the opus_books dataset.
|
34 |
It achieves the following results on the evaluation set:
|
35 |
+
- Loss: 3.3469
|
36 |
+
- Bleu: 0.5339
|
37 |
+
- Gen Len: 18.3615
|
38 |
|
39 |
## Model description
|
40 |
|
|
|
59 |
- seed: 42
|
60 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
61 |
- lr_scheduler_type: linear
|
62 |
+
- num_epochs: 5
|
63 |
- mixed_precision_training: Native AMP
|
64 |
|
65 |
### Training results
|
66 |
|
67 |
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|
68 |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
|
69 |
+
| No log | 1.0 | 53 | 3.5692 | 0.1211 | 18.1174 |
|
70 |
+
| No log | 2.0 | 106 | 3.4566 | 0.4345 | 18.2207 |
|
71 |
+
| No log | 3.0 | 159 | 3.3922 | 0.4381 | 18.3756 |
|
72 |
+
| No log | 4.0 | 212 | 3.3586 | 0.5165 | 18.3615 |
|
73 |
+
| No log | 5.0 | 265 | 3.3469 | 0.5339 | 18.3615 |
|
74 |
|
75 |
|
76 |
### Framework versions
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"T5ForConditionalGeneration"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "oSabre/opus_books_es_pt",
|
3 |
"architectures": [
|
4 |
"T5ForConditionalGeneration"
|
5 |
],
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 242041896
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d3f2a9310a83889c53273045357eb92b1fb1288d913789c8575b3bec01a1fa8
|
3 |
size 242041896
|
runs/Dec17_11-57-45_f20db7578e83/events.out.tfevents.1702814266.f20db7578e83.6609.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bb801def61d4fa3f63c9bf360239ee268e18892759d0812cf3bc4a5d0969775
|
3 |
+
size 7566
|
tokenizer.json
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
"direction": "Right",
|
5 |
-
"max_length": 400,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
tokenizer_config.json
CHANGED
@@ -930,8 +930,12 @@
|
|
930 |
"clean_up_tokenization_spaces": true,
|
931 |
"eos_token": "</s>",
|
932 |
"extra_ids": 100,
|
|
|
933 |
"model_max_length": 512,
|
934 |
"pad_token": "<pad>",
|
|
|
935 |
"tokenizer_class": "T5Tokenizer",
|
|
|
|
|
936 |
"unk_token": "<unk>"
|
937 |
}
|
|
|
930 |
"clean_up_tokenization_spaces": true,
|
931 |
"eos_token": "</s>",
|
932 |
"extra_ids": 100,
|
933 |
+
"max_length": 400,
|
934 |
"model_max_length": 512,
|
935 |
"pad_token": "<pad>",
|
936 |
+
"stride": 0,
|
937 |
"tokenizer_class": "T5Tokenizer",
|
938 |
+
"truncation_side": "right",
|
939 |
+
"truncation_strategy": "longest_first",
|
940 |
"unk_token": "<unk>"
|
941 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4856
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:186a44097e196c79414bf8883ed56c83c6e3c107f7db5663560f70eefb70910d
|
3 |
size 4856
|