karunac commited on
Commit
827f8ac
·
verified ·
1 Parent(s): 76c7add

Training in progress, epoch 1

Browse files
Files changed (37) hide show
  1. model.safetensors +1 -1
  2. run-4/checkpoint-12/config.json +25 -0
  3. run-4/checkpoint-12/model.safetensors +3 -0
  4. run-4/checkpoint-12/optimizer.pt +3 -0
  5. run-4/checkpoint-12/rng_state.pth +3 -0
  6. run-4/checkpoint-12/scheduler.pt +3 -0
  7. run-4/checkpoint-12/special_tokens_map.json +7 -0
  8. run-4/checkpoint-12/tokenizer.json +0 -0
  9. run-4/checkpoint-12/tokenizer_config.json +55 -0
  10. run-4/checkpoint-12/trainer_state.json +53 -0
  11. run-4/checkpoint-12/training_args.bin +3 -0
  12. run-4/checkpoint-12/vocab.txt +0 -0
  13. run-4/checkpoint-8/config.json +25 -0
  14. run-4/checkpoint-8/model.safetensors +3 -0
  15. run-4/checkpoint-8/optimizer.pt +3 -0
  16. run-4/checkpoint-8/rng_state.pth +3 -0
  17. run-4/checkpoint-8/scheduler.pt +3 -0
  18. run-4/checkpoint-8/special_tokens_map.json +7 -0
  19. run-4/checkpoint-8/tokenizer.json +0 -0
  20. run-4/checkpoint-8/tokenizer_config.json +55 -0
  21. run-4/checkpoint-8/trainer_state.json +44 -0
  22. run-4/checkpoint-8/training_args.bin +3 -0
  23. run-4/checkpoint-8/vocab.txt +0 -0
  24. run-5/checkpoint-25/config.json +25 -0
  25. run-5/checkpoint-25/model.safetensors +3 -0
  26. run-5/checkpoint-25/optimizer.pt +3 -0
  27. run-5/checkpoint-25/rng_state.pth +3 -0
  28. run-5/checkpoint-25/scheduler.pt +3 -0
  29. run-5/checkpoint-25/special_tokens_map.json +7 -0
  30. run-5/checkpoint-25/tokenizer.json +0 -0
  31. run-5/checkpoint-25/tokenizer_config.json +55 -0
  32. run-5/checkpoint-25/trainer_state.json +35 -0
  33. run-5/checkpoint-25/training_args.bin +3 -0
  34. run-5/checkpoint-25/vocab.txt +0 -0
  35. runs/Mar05_15-34-07_d253bcf50e04/events.out.tfevents.1709653651.d253bcf50e04.1525.7 +2 -2
  36. runs/Mar05_15-34-07_d253bcf50e04/events.out.tfevents.1709653778.d253bcf50e04.1525.8 +3 -0
  37. training_args.bin +1 -1
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e32952ce2f35921c5eebb05af7a70dd3f137622033ae91b3876188a18cb966ff
3
  size 267832560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:256c3ea36cc9d63a74d5579a08ff66e75bacb00961475c74463a964ff95cca0d
3
  size 267832560
run-4/checkpoint-12/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "problem_type": "single_label_classification",
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "tie_weights_": true,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.38.1",
24
+ "vocab_size": 30522
25
+ }
run-4/checkpoint-12/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d6488003a4791021df9074cc760a7a58012c127220c40d8377d0eb2fb4c572b
3
+ size 267832560
run-4/checkpoint-12/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30e975b8997a4ec0670b7a13a7ec8a1d758c73ed6e7c46727de79872ce38decb
3
+ size 535724410
run-4/checkpoint-12/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1620075dd1d654fcc4a7cb9da54981b33366e0245686bfef4a2eddb21f79859a
3
+ size 14054
run-4/checkpoint-12/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88d50f68c8fe1369786814143917596a14347eaae0da67aa148b5f042fc2953e
3
+ size 1064
run-4/checkpoint-12/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-4/checkpoint-12/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-4/checkpoint-12/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-4/checkpoint-12/trainer_state.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.0,
3
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-4/checkpoint-4",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 12,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_loss": 0.6419929265975952,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 3.2838,
16
+ "eval_samples_per_second": 30.452,
17
+ "eval_steps_per_second": 2.132,
18
+ "step": 4
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 0.6443811058998108,
23
+ "eval_matthews_correlation": 0.0,
24
+ "eval_runtime": 4.8961,
25
+ "eval_samples_per_second": 20.424,
26
+ "eval_steps_per_second": 1.43,
27
+ "step": 8
28
+ },
29
+ {
30
+ "epoch": 3.0,
31
+ "eval_loss": 0.6548563241958618,
32
+ "eval_matthews_correlation": 0.0,
33
+ "eval_runtime": 6.2359,
34
+ "eval_samples_per_second": 16.036,
35
+ "eval_steps_per_second": 1.123,
36
+ "step": 12
37
+ }
38
+ ],
39
+ "logging_steps": 500,
40
+ "max_steps": 12,
41
+ "num_input_tokens_seen": 0,
42
+ "num_train_epochs": 3,
43
+ "save_steps": 500,
44
+ "total_flos": 0,
45
+ "train_batch_size": 64,
46
+ "trial_name": null,
47
+ "trial_params": {
48
+ "learning_rate": 9.958711989637105e-05,
49
+ "num_train_epochs": 3,
50
+ "per_device_train_batch_size": 64,
51
+ "seed": 29
52
+ }
53
+ }
run-4/checkpoint-12/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77373c5a9c7c5d17be09f65236ef7854157a64134869774017cfb9730dc4ef96
3
+ size 4984
run-4/checkpoint-12/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-4/checkpoint-8/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "problem_type": "single_label_classification",
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "tie_weights_": true,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.38.1",
24
+ "vocab_size": 30522
25
+ }
run-4/checkpoint-8/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:905e03a74cd21fe2059d03b397200a10207defa48ff4bff8f741d142925e5eec
3
+ size 267832560
run-4/checkpoint-8/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a979e10c4e5ce05277f401634c871a53299f5226d85a6f611f5f0b8318dfdef
3
+ size 535724410
run-4/checkpoint-8/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d6dd331a23679c3fe454bd0fa0a6daca79317a9309f325b864b371c9b512e9
3
+ size 14054
run-4/checkpoint-8/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e992334c9bfb01051b53070898146c8c6195550372812ded926123f57983f73
3
+ size 1064
run-4/checkpoint-8/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-4/checkpoint-8/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-4/checkpoint-8/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-4/checkpoint-8/trainer_state.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.0,
3
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-4/checkpoint-4",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 8,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_loss": 0.6419929265975952,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 3.2838,
16
+ "eval_samples_per_second": 30.452,
17
+ "eval_steps_per_second": 2.132,
18
+ "step": 4
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 0.6443811058998108,
23
+ "eval_matthews_correlation": 0.0,
24
+ "eval_runtime": 4.8961,
25
+ "eval_samples_per_second": 20.424,
26
+ "eval_steps_per_second": 1.43,
27
+ "step": 8
28
+ }
29
+ ],
30
+ "logging_steps": 500,
31
+ "max_steps": 12,
32
+ "num_input_tokens_seen": 0,
33
+ "num_train_epochs": 3,
34
+ "save_steps": 500,
35
+ "total_flos": 0,
36
+ "train_batch_size": 64,
37
+ "trial_name": null,
38
+ "trial_params": {
39
+ "learning_rate": 9.958711989637105e-05,
40
+ "num_train_epochs": 3,
41
+ "per_device_train_batch_size": 64,
42
+ "seed": 29
43
+ }
44
+ }
run-4/checkpoint-8/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77373c5a9c7c5d17be09f65236ef7854157a64134869774017cfb9730dc4ef96
3
+ size 4984
run-4/checkpoint-8/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-5/checkpoint-25/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "problem_type": "single_label_classification",
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "tie_weights_": true,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.38.1",
24
+ "vocab_size": 30522
25
+ }
run-5/checkpoint-25/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:256c3ea36cc9d63a74d5579a08ff66e75bacb00961475c74463a964ff95cca0d
3
+ size 267832560
run-5/checkpoint-25/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d5deeae337c9eec2f09386711fe7fb3fba412d04b3f6f5eda2f735928ee563b
3
+ size 535724410
run-5/checkpoint-25/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9b8cca76a792dad8fa518ad33b3a56690bffe18bf609f099a55947e32899b0
3
+ size 13990
run-5/checkpoint-25/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4971b67ffda84df6065cba51ff17ad37f81942b950e906f686ee95f0fa9c55cd
3
+ size 1064
run-5/checkpoint-25/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-5/checkpoint-25/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-5/checkpoint-25/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-5/checkpoint-25/trainer_state.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.0,
3
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-5/checkpoint-25",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 25,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_loss": 0.6417602300643921,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 3.8641,
16
+ "eval_samples_per_second": 25.879,
17
+ "eval_steps_per_second": 1.812,
18
+ "step": 25
19
+ }
20
+ ],
21
+ "logging_steps": 500,
22
+ "max_steps": 100,
23
+ "num_input_tokens_seen": 0,
24
+ "num_train_epochs": 4,
25
+ "save_steps": 500,
26
+ "total_flos": 0,
27
+ "train_batch_size": 8,
28
+ "trial_name": null,
29
+ "trial_params": {
30
+ "learning_rate": 1.8703132341483286e-05,
31
+ "num_train_epochs": 4,
32
+ "per_device_train_batch_size": 8,
33
+ "seed": 1
34
+ }
35
+ }
run-5/checkpoint-25/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:409f465cf2cf58986ab248a27effbe2799c767cce4aaa079fe4d8dff8c3e1ed5
3
+ size 4984
run-5/checkpoint-25/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
runs/Mar05_15-34-07_d253bcf50e04/events.out.tfevents.1709653651.d253bcf50e04.1525.7 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dae4cfc390e56c11fa2ccf4a3a1af6cc4d790eecc5a4853fce3ec5300b000f17
3
- size 5200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c8c2f168aac149381e6b692bf42de4f787b09bb45c9c075634eee7550a74291
3
+ size 5877
runs/Mar05_15-34-07_d253bcf50e04/events.out.tfevents.1709653778.d253bcf50e04.1525.8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9de76d1ce6a8b17ce9c222499f7590dfa677fb461f15974630e9dbe636680339
3
+ size 5199
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77373c5a9c7c5d17be09f65236ef7854157a64134869774017cfb9730dc4ef96
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:409f465cf2cf58986ab248a27effbe2799c767cce4aaa079fe4d8dff8c3e1ed5
3
  size 4984