guymorlan commited on
Commit
f7bb7af
1 Parent(s): 14577f3
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/home/etherx/translation/results_en2ar_dialect/Helsinki-NLP/opus-mt-en-ar__12epochs/checkpoint-2304/",
3
  "activation_dropout": 0.0,
4
  "activation_function": "swish",
5
  "add_bias_logits": false,
 
1
  {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-en-ar",
3
  "activation_dropout": 0.0,
4
  "activation_function": "swish",
5
  "add_bias_logits": false,
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0e45ac515dba05240c58d635cbddc8643c7e9fe41092bb1ee4f7424a1ccecf0
3
+ size 610493445
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c474d828b8c29372be59e235f08360a0f398d4f321ac08ef77152a065e90d37
3
- size 305506629
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9710e00a204f5ebd367111db83464c2b2e3f68d65d1f7bc6f1f446275ea9025f
3
+ size 305510213
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5710e21522536bddb2a303b2d2f26c04cfe62960b65ffd1052b6d0b97e43649a
3
+ size 14639
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7102c557d3e1820272886f4582d50e2eee3a280f99efca8a3a99e52e20a2107f
3
+ size 627
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "eos_token": "</s>",
3
  "model_max_length": 512,
4
- "name_or_path": "/home/etherx/translation/results_en2ar_dialect/Helsinki-NLP/opus-mt-en-ar__12epochs/checkpoint-2304/",
5
  "pad_token": "<pad>",
6
  "return_tensors": "pt",
7
  "separate_vocabs": false,
 
1
  {
2
  "eos_token": "</s>",
3
  "model_max_length": 512,
4
+ "name_or_path": "Helsinki-NLP/opus-mt-en-ar",
5
  "pad_token": "<pad>",
6
  "return_tensors": "pt",
7
  "separate_vocabs": false,
trainer_state.json ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 8.999349381912817,
5
+ "global_step": 6912,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.65,
12
+ "learning_rate": 4.7287326388888894e-05,
13
+ "loss": 2.9288,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_bleu": 12.897750004409371,
19
+ "eval_loss": 2.277336835861206,
20
+ "eval_runtime": 3373.4947,
21
+ "eval_samples_per_second": 2.43,
22
+ "eval_steps_per_second": 0.304,
23
+ "step": 768
24
+ },
25
+ {
26
+ "epoch": 1.3,
27
+ "learning_rate": 4.457465277777778e-05,
28
+ "loss": 2.3415,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 1.95,
33
+ "learning_rate": 4.186197916666667e-05,
34
+ "loss": 2.1063,
35
+ "step": 1500
36
+ },
37
+ {
38
+ "epoch": 2.0,
39
+ "eval_bleu": 15.060690733469018,
40
+ "eval_loss": 2.0984933376312256,
41
+ "eval_runtime": 3630.6371,
42
+ "eval_samples_per_second": 2.258,
43
+ "eval_steps_per_second": 0.282,
44
+ "step": 1536
45
+ },
46
+ {
47
+ "epoch": 2.6,
48
+ "learning_rate": 3.914930555555556e-05,
49
+ "loss": 1.884,
50
+ "step": 2000
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_bleu": 16.01923837036625,
55
+ "eval_loss": 2.0154550075531006,
56
+ "eval_runtime": 3598.1389,
57
+ "eval_samples_per_second": 2.278,
58
+ "eval_steps_per_second": 0.285,
59
+ "step": 2304
60
+ },
61
+ {
62
+ "epoch": 3.26,
63
+ "learning_rate": 3.643663194444444e-05,
64
+ "loss": 1.7932,
65
+ "step": 2500
66
+ },
67
+ {
68
+ "epoch": 3.91,
69
+ "learning_rate": 3.372395833333333e-05,
70
+ "loss": 1.6813,
71
+ "step": 3000
72
+ },
73
+ {
74
+ "epoch": 4.0,
75
+ "eval_bleu": 16.69411603598328,
76
+ "eval_loss": 1.9759808778762817,
77
+ "eval_runtime": 3156.7439,
78
+ "eval_samples_per_second": 2.597,
79
+ "eval_steps_per_second": 0.325,
80
+ "step": 3072
81
+ },
82
+ {
83
+ "epoch": 4.56,
84
+ "learning_rate": 3.1011284722222224e-05,
85
+ "loss": 1.5634,
86
+ "step": 3500
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "eval_bleu": 17.655854646910004,
91
+ "eval_loss": 1.9511423110961914,
92
+ "eval_runtime": 3562.0988,
93
+ "eval_samples_per_second": 2.301,
94
+ "eval_steps_per_second": 0.288,
95
+ "step": 3840
96
+ },
97
+ {
98
+ "epoch": 5.21,
99
+ "learning_rate": 2.8298611111111113e-05,
100
+ "loss": 1.5116,
101
+ "step": 4000
102
+ },
103
+ {
104
+ "epoch": 5.86,
105
+ "learning_rate": 2.55859375e-05,
106
+ "loss": 1.4307,
107
+ "step": 4500
108
+ },
109
+ {
110
+ "epoch": 6.0,
111
+ "eval_bleu": 17.453809817741682,
112
+ "eval_loss": 1.9419958591461182,
113
+ "eval_runtime": 3674.8309,
114
+ "eval_samples_per_second": 2.231,
115
+ "eval_steps_per_second": 0.279,
116
+ "step": 4608
117
+ },
118
+ {
119
+ "epoch": 6.51,
120
+ "learning_rate": 2.287326388888889e-05,
121
+ "loss": 1.3663,
122
+ "step": 5000
123
+ },
124
+ {
125
+ "epoch": 7.0,
126
+ "eval_bleu": 17.864149441439213,
127
+ "eval_loss": 1.9385316371917725,
128
+ "eval_runtime": 3083.4711,
129
+ "eval_samples_per_second": 2.658,
130
+ "eval_steps_per_second": 0.332,
131
+ "step": 5376
132
+ },
133
+ {
134
+ "epoch": 7.16,
135
+ "learning_rate": 2.016059027777778e-05,
136
+ "loss": 1.3295,
137
+ "step": 5500
138
+ },
139
+ {
140
+ "epoch": 7.81,
141
+ "learning_rate": 1.7447916666666666e-05,
142
+ "loss": 1.2783,
143
+ "step": 6000
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "eval_bleu": 17.90688441590411,
148
+ "eval_loss": 1.9373716115951538,
149
+ "eval_runtime": 3414.7309,
150
+ "eval_samples_per_second": 2.4,
151
+ "eval_steps_per_second": 0.3,
152
+ "step": 6144
153
+ },
154
+ {
155
+ "epoch": 8.46,
156
+ "learning_rate": 1.4735243055555556e-05,
157
+ "loss": 1.2304,
158
+ "step": 6500
159
+ },
160
+ {
161
+ "epoch": 9.0,
162
+ "eval_bleu": 18.254785590207636,
163
+ "eval_loss": 1.9363410472869873,
164
+ "eval_runtime": 3545.3415,
165
+ "eval_samples_per_second": 2.312,
166
+ "eval_steps_per_second": 0.289,
167
+ "step": 6912
168
+ }
169
+ ],
170
+ "max_steps": 9216,
171
+ "num_train_epochs": 12,
172
+ "total_flos": 5739521784938496.0,
173
+ "trial_name": null,
174
+ "trial_params": null
175
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc1ecc373bc106cc887c3257716a3524fcf9836bb90e6914188d48139988306d
3
+ size 3643