srsawant34 commited on
Commit
2a95641
·
1 Parent(s): d4e7c7a

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 1_Pooling/config.json +7 -0
  2. README.md +107 -0
  3. checkpoint-1148/model.safetensors +3 -0
  4. checkpoint-1148/optimizer.pt +3 -0
  5. checkpoint-1148/rng_state.pth +3 -0
  6. checkpoint-1148/scheduler.pt +3 -0
  7. checkpoint-1148/trainer_state.json +45 -0
  8. checkpoint-1148/training_args.bin +3 -0
  9. checkpoint-1435/model.safetensors +3 -0
  10. checkpoint-1435/optimizer.pt +3 -0
  11. checkpoint-1435/rng_state.pth +3 -0
  12. checkpoint-1435/scheduler.pt +3 -0
  13. checkpoint-1435/trainer_state.json +51 -0
  14. checkpoint-1435/training_args.bin +3 -0
  15. checkpoint-1722/model.safetensors +3 -0
  16. checkpoint-1722/optimizer.pt +3 -0
  17. checkpoint-1722/rng_state.pth +3 -0
  18. checkpoint-1722/scheduler.pt +3 -0
  19. checkpoint-1722/trainer_state.json +57 -0
  20. checkpoint-1722/training_args.bin +3 -0
  21. checkpoint-2009/model.safetensors +3 -0
  22. checkpoint-2009/optimizer.pt +3 -0
  23. checkpoint-2009/rng_state.pth +3 -0
  24. checkpoint-2009/scheduler.pt +3 -0
  25. checkpoint-2009/trainer_state.json +63 -0
  26. checkpoint-2009/training_args.bin +3 -0
  27. checkpoint-2296/model.safetensors +3 -0
  28. checkpoint-2296/optimizer.pt +3 -0
  29. checkpoint-2296/rng_state.pth +3 -0
  30. checkpoint-2296/scheduler.pt +3 -0
  31. checkpoint-2296/trainer_state.json +69 -0
  32. checkpoint-2296/training_args.bin +3 -0
  33. checkpoint-287/model.safetensors +3 -0
  34. checkpoint-287/optimizer.pt +3 -0
  35. checkpoint-287/rng_state.pth +3 -0
  36. checkpoint-287/scheduler.pt +3 -0
  37. checkpoint-287/trainer_state.json +27 -0
  38. checkpoint-287/training_args.bin +3 -0
  39. checkpoint-574/model.safetensors +3 -0
  40. checkpoint-574/optimizer.pt +3 -0
  41. checkpoint-574/rng_state.pth +3 -0
  42. checkpoint-574/scheduler.pt +3 -0
  43. checkpoint-574/trainer_state.json +33 -0
  44. checkpoint-574/training_args.bin +3 -0
  45. checkpoint-8323/model.safetensors +3 -0
  46. checkpoint-8323/optimizer.pt +3 -0
  47. checkpoint-8323/rng_state.pth +3 -0
  48. checkpoint-8323/scheduler.pt +3 -0
  49. checkpoint-8323/trainer_state.json +195 -0
  50. checkpoint-8323/training_args.bin +3 -0
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 384,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ license: apache-2.0
4
+ tags:
5
+ - sentence-transformers
6
+ - feature-extraction
7
+ - sentence-similarity
8
+ - transformers
9
+ ---
10
+
11
+ # sentence-transformers/paraphrase-MiniLM-L6-v2
12
+
13
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
14
+
15
+
16
+
17
+ ## Usage (Sentence-Transformers)
18
+
19
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
20
+
21
+ ```
22
+ pip install -U sentence-transformers
23
+ ```
24
+
25
+ Then you can use the model like this:
26
+
27
+ ```python
28
+ from sentence_transformers import SentenceTransformer
29
+ sentences = ["This is an example sentence", "Each sentence is converted"]
30
+
31
+ model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2')
32
+ embeddings = model.encode(sentences)
33
+ print(embeddings)
34
+ ```
35
+
36
+
37
+
38
+ ## Usage (HuggingFace Transformers)
39
+ Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
40
+
41
+ ```python
42
+ from transformers import AutoTokenizer, AutoModel
43
+ import torch
44
+
45
+
46
+ #Mean Pooling - Take attention mask into account for correct averaging
47
+ def mean_pooling(model_output, attention_mask):
48
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
49
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
50
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
51
+
52
+
53
+ # Sentences we want sentence embeddings for
54
+ sentences = ['This is an example sentence', 'Each sentence is converted']
55
+
56
+ # Load model from HuggingFace Hub
57
+ tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-MiniLM-L6-v2')
58
+ model = AutoModel.from_pretrained('sentence-transformers/paraphrase-MiniLM-L6-v2')
59
+
60
+ # Tokenize sentences
61
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
62
+
63
+ # Compute token embeddings
64
+ with torch.no_grad():
65
+ model_output = model(**encoded_input)
66
+
67
+ # Perform pooling. In this case, max pooling.
68
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
69
+
70
+ print("Sentence embeddings:")
71
+ print(sentence_embeddings)
72
+ ```
73
+
74
+
75
+
76
+ ## Evaluation Results
77
+
78
+
79
+
80
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/paraphrase-MiniLM-L6-v2)
81
+
82
+
83
+
84
+ ## Full Model Architecture
85
+ ```
86
+ SentenceTransformer(
87
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
88
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
89
+ )
90
+ ```
91
+
92
+ ## Citing & Authors
93
+
94
+ This model was trained by [sentence-transformers](https://www.sbert.net/).
95
+
96
+ If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084):
97
+ ```bibtex
98
+ @inproceedings{reimers-2019-sentence-bert,
99
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
100
+ author = "Reimers, Nils and Gurevych, Iryna",
101
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
102
+ month = "11",
103
+ year = "2019",
104
+ publisher = "Association for Computational Linguistics",
105
+ url = "http://arxiv.org/abs/1908.10084",
106
+ }
107
+ ```
checkpoint-1148/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b2098e96fe45a5f982c95e9326eeb8656a11a99113e82ae00dda07104972886
3
+ size 90866120
checkpoint-1148/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3179416f106ae2f73588f990002ac14e2c2d259eaba15ca06fb8f9e8e63d390e
3
+ size 180607738
checkpoint-1148/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87082ae80ddb9d34dbe1ab1348f8126541868839519795215c9dcc059ab63fc6
3
+ size 14244
checkpoint-1148/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c37d10e949e0aa0b73992efa2d5c4e60e9e1eb519cda5feb64f0ab41724e9f3
3
+ size 1064
checkpoint-1148/trainer_state.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1148,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0003125,
26
+ "loss": 2.223,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.00025,
32
+ "loss": 2.1849,
33
+ "step": 1148
34
+ }
35
+ ],
36
+ "logging_steps": 500,
37
+ "max_steps": 2296,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 8,
40
+ "save_steps": 500,
41
+ "total_flos": 0.0,
42
+ "train_batch_size": 64,
43
+ "trial_name": null,
44
+ "trial_params": null
45
+ }
checkpoint-1148/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-1435/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8c4d32a407aaa87a88f0e6bbd1ac189f2a27e026dd588466a5f8b37576ff9b
3
+ size 90866120
checkpoint-1435/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d10bd4efa881893777d05d306a2a6cabf7b54b6127e45e93eb4bb7d740c6cdb3
3
+ size 180607738
checkpoint-1435/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e824cfb48c98373f53fa9ca5548f2cb8c492dbb792ffd913076a907a3be071
3
+ size 14244
checkpoint-1435/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b5c008aeab85448dc93b26ef48ef1b1487424fcaa42a2b4edbcc01433b9da0
3
+ size 1064
checkpoint-1435/trainer_state.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1435,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0003125,
26
+ "loss": 2.223,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.00025,
32
+ "loss": 2.1849,
33
+ "step": 1148
34
+ },
35
+ {
36
+ "epoch": 5.0,
37
+ "learning_rate": 0.0001875,
38
+ "loss": 2.129,
39
+ "step": 1435
40
+ }
41
+ ],
42
+ "logging_steps": 500,
43
+ "max_steps": 2296,
44
+ "num_input_tokens_seen": 0,
45
+ "num_train_epochs": 8,
46
+ "save_steps": 500,
47
+ "total_flos": 0.0,
48
+ "train_batch_size": 64,
49
+ "trial_name": null,
50
+ "trial_params": null
51
+ }
checkpoint-1435/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-1722/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c8f67022add8bdf2b50f0f9728abf4878d9769cf37b6c527772b38f276f87ba
3
+ size 90866120
checkpoint-1722/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2d2ef97ecad6c8c0dbd0832adfc3310b8ac297d3d71519d4812ce989d7d08a
3
+ size 180607738
checkpoint-1722/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6410d8133abaf4a88621d2bcb3b13abb0ad5ba2fb8420e82b82efbc22b99f0a5
3
+ size 14244
checkpoint-1722/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ade3fcc8dc579752118cf26ed7f71bb709b1db10d0dc00e45e62e34e19de1862
3
+ size 1064
checkpoint-1722/trainer_state.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1722,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0003125,
26
+ "loss": 2.223,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.00025,
32
+ "loss": 2.1849,
33
+ "step": 1148
34
+ },
35
+ {
36
+ "epoch": 5.0,
37
+ "learning_rate": 0.0001875,
38
+ "loss": 2.129,
39
+ "step": 1435
40
+ },
41
+ {
42
+ "epoch": 6.0,
43
+ "learning_rate": 0.000125,
44
+ "loss": 2.0923,
45
+ "step": 1722
46
+ }
47
+ ],
48
+ "logging_steps": 500,
49
+ "max_steps": 2296,
50
+ "num_input_tokens_seen": 0,
51
+ "num_train_epochs": 8,
52
+ "save_steps": 500,
53
+ "total_flos": 0.0,
54
+ "train_batch_size": 64,
55
+ "trial_name": null,
56
+ "trial_params": null
57
+ }
checkpoint-1722/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-2009/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:404ec8daad9aaa3bbf36ac0b5ae66ebc740e09b008b59e7b546f4d65846cf0c8
3
+ size 90866120
checkpoint-2009/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7155b933ef2854d6e51dc2007a87d953256098b5a55202a2f7b38594c7c1489c
3
+ size 180607738
checkpoint-2009/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e5a38071360faecf02821c8f4fbb614f6435fe31e87d746da0bc03e552b342c
3
+ size 14244
checkpoint-2009/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cee79f61c3bb2fb8a9032e1977574f64f859c6dd73869c3b327fbffa6589172
3
+ size 1064
checkpoint-2009/trainer_state.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2009,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0003125,
26
+ "loss": 2.223,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.00025,
32
+ "loss": 2.1849,
33
+ "step": 1148
34
+ },
35
+ {
36
+ "epoch": 5.0,
37
+ "learning_rate": 0.0001875,
38
+ "loss": 2.129,
39
+ "step": 1435
40
+ },
41
+ {
42
+ "epoch": 6.0,
43
+ "learning_rate": 0.000125,
44
+ "loss": 2.0923,
45
+ "step": 1722
46
+ },
47
+ {
48
+ "epoch": 7.0,
49
+ "learning_rate": 6.25e-05,
50
+ "loss": 2.0515,
51
+ "step": 2009
52
+ }
53
+ ],
54
+ "logging_steps": 500,
55
+ "max_steps": 2296,
56
+ "num_input_tokens_seen": 0,
57
+ "num_train_epochs": 8,
58
+ "save_steps": 500,
59
+ "total_flos": 0.0,
60
+ "train_batch_size": 64,
61
+ "trial_name": null,
62
+ "trial_params": null
63
+ }
checkpoint-2009/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-2296/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e26c278c82dfef249a456ec36e221a988b6d68d362fee2cb8d267663be9a8839
3
+ size 90866120
checkpoint-2296/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321acd24548f9e1e302833e2279fa7442bf21c987d8e771bd5c094247b334456
3
+ size 180607738
checkpoint-2296/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6089dac575b53bd88bb39a67970f95bf63a880871d9872b7a41372a647ff1653
3
+ size 14244
checkpoint-2296/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1d06d8c680538100d481f36453ab9a97fc9432e250541c38cdeebf478aa981f
3
+ size 1064
checkpoint-2296/trainer_state.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2296,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0003125,
26
+ "loss": 2.223,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.00025,
32
+ "loss": 2.1849,
33
+ "step": 1148
34
+ },
35
+ {
36
+ "epoch": 5.0,
37
+ "learning_rate": 0.0001875,
38
+ "loss": 2.129,
39
+ "step": 1435
40
+ },
41
+ {
42
+ "epoch": 6.0,
43
+ "learning_rate": 0.000125,
44
+ "loss": 2.0923,
45
+ "step": 1722
46
+ },
47
+ {
48
+ "epoch": 7.0,
49
+ "learning_rate": 6.25e-05,
50
+ "loss": 2.0515,
51
+ "step": 2009
52
+ },
53
+ {
54
+ "epoch": 8.0,
55
+ "learning_rate": 0.0,
56
+ "loss": 2.027,
57
+ "step": 2296
58
+ }
59
+ ],
60
+ "logging_steps": 500,
61
+ "max_steps": 2296,
62
+ "num_input_tokens_seen": 0,
63
+ "num_train_epochs": 8,
64
+ "save_steps": 500,
65
+ "total_flos": 0.0,
66
+ "train_batch_size": 64,
67
+ "trial_name": null,
68
+ "trial_params": null
69
+ }
checkpoint-2296/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-287/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39da78be7e3a484ab8795fb515ec18f0be4754c5cab212996559e1a53ee32de6
3
+ size 90866120
checkpoint-287/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0a5b06cb0024bd0156188d29444929a1dc946adcc7c294a6fd797e1254b8e5
3
+ size 180607738
checkpoint-287/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d95ef98f5cd4b6c4540739b122435f8121ce1d964466889af8f8484ca6504e2c
3
+ size 14244
checkpoint-287/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddfeeb2c40d442d9cf6870b8d5c753e579d58c73dd822f86ccebebc7298add3a
3
+ size 1064
checkpoint-287/trainer_state.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 287,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ }
17
+ ],
18
+ "logging_steps": 500,
19
+ "max_steps": 2296,
20
+ "num_input_tokens_seen": 0,
21
+ "num_train_epochs": 8,
22
+ "save_steps": 500,
23
+ "total_flos": 0.0,
24
+ "train_batch_size": 64,
25
+ "trial_name": null,
26
+ "trial_params": null
27
+ }
checkpoint-287/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-574/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:349a2c2e0c215111818507eb78aa0b183262faa7649367608bc67e77ea5d2218
3
+ size 90866120
checkpoint-574/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c92b4b4b84841ca6f94381ed19e37081521ad086f7346a6344212e73439111
3
+ size 180607738
checkpoint-574/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8a4c4c8306949aa90fda7de408dd3be7bad8cfdb6a96376be8e42b698c0dcec
3
+ size 14244
checkpoint-574/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2053df1c10f5c000fa932bfa8f8a1e64f92dd69d5c946f45ddb7e721b17ac683
3
+ size 1064
checkpoint-574/trainer_state.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 574,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.0004375,
14
+ "loss": 2.2956,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.000375,
20
+ "loss": 2.2508,
21
+ "step": 574
22
+ }
23
+ ],
24
+ "logging_steps": 500,
25
+ "max_steps": 2296,
26
+ "num_input_tokens_seen": 0,
27
+ "num_train_epochs": 8,
28
+ "save_steps": 500,
29
+ "total_flos": 0.0,
30
+ "train_batch_size": 64,
31
+ "trial_name": null,
32
+ "trial_params": null
33
+ }
checkpoint-574/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59691d62067580cb96432d7835b9a22fa0cdbf9d683d1ce4d96a99344613e85b
3
+ size 4792
checkpoint-8323/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eb2a5db42fe1825e43b69b178c42992e7d87576776e33758f75592acf8c1f89
3
+ size 90866120
checkpoint-8323/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b94a49c3e5c9b3b80c1f296a41d62252434fdc86628b024267ec35270194497
3
+ size 180607738
checkpoint-8323/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113c7031f546d1e57f4645de606e8624d51751acbde70de8fdcf580b016726fa
3
+ size 14244
checkpoint-8323/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:003133324f971f67e818819da368b5822c56b1e11457a78920cf74596a8a62a2
3
+ size 1064
checkpoint-8323/trainer_state.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 29.0,
5
+ "eval_steps": 500,
6
+ "global_step": 8323,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "learning_rate": 0.000484375,
14
+ "loss": 3.0823,
15
+ "step": 287
16
+ },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 0.00046875,
20
+ "loss": 2.7242,
21
+ "step": 574
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.000453125,
26
+ "loss": 2.5348,
27
+ "step": 861
28
+ },
29
+ {
30
+ "epoch": 4.0,
31
+ "learning_rate": 0.0004375,
32
+ "loss": 2.4455,
33
+ "step": 1148
34
+ },
35
+ {
36
+ "epoch": 5.0,
37
+ "learning_rate": 0.000421875,
38
+ "loss": 2.3794,
39
+ "step": 1435
40
+ },
41
+ {
42
+ "epoch": 6.0,
43
+ "learning_rate": 0.00040625000000000004,
44
+ "loss": 2.3375,
45
+ "step": 1722
46
+ },
47
+ {
48
+ "epoch": 7.0,
49
+ "learning_rate": 0.000390625,
50
+ "loss": 2.3262,
51
+ "step": 2009
52
+ },
53
+ {
54
+ "epoch": 8.0,
55
+ "learning_rate": 0.000375,
56
+ "loss": 2.3114,
57
+ "step": 2296
58
+ },
59
+ {
60
+ "epoch": 9.0,
61
+ "learning_rate": 0.000359375,
62
+ "loss": 2.2921,
63
+ "step": 2583
64
+ },
65
+ {
66
+ "epoch": 10.0,
67
+ "learning_rate": 0.00034375,
68
+ "loss": 2.2918,
69
+ "step": 2870
70
+ },
71
+ {
72
+ "epoch": 11.0,
73
+ "learning_rate": 0.000328125,
74
+ "loss": 2.2578,
75
+ "step": 3157
76
+ },
77
+ {
78
+ "epoch": 12.0,
79
+ "learning_rate": 0.0003125,
80
+ "loss": 2.2693,
81
+ "step": 3444
82
+ },
83
+ {
84
+ "epoch": 13.0,
85
+ "learning_rate": 0.000296875,
86
+ "loss": 2.2594,
87
+ "step": 3731
88
+ },
89
+ {
90
+ "epoch": 14.0,
91
+ "learning_rate": 0.00028125000000000003,
92
+ "loss": 2.2555,
93
+ "step": 4018
94
+ },
95
+ {
96
+ "epoch": 15.0,
97
+ "learning_rate": 0.000265625,
98
+ "loss": 2.2481,
99
+ "step": 4305
100
+ },
101
+ {
102
+ "epoch": 16.0,
103
+ "learning_rate": 0.00025,
104
+ "loss": 2.2468,
105
+ "step": 4592
106
+ },
107
+ {
108
+ "epoch": 17.0,
109
+ "learning_rate": 0.000234375,
110
+ "loss": 2.248,
111
+ "step": 4879
112
+ },
113
+ {
114
+ "epoch": 18.0,
115
+ "learning_rate": 0.00021875,
116
+ "loss": 2.2435,
117
+ "step": 5166
118
+ },
119
+ {
120
+ "epoch": 19.0,
121
+ "learning_rate": 0.00020312500000000002,
122
+ "loss": 2.2319,
123
+ "step": 5453
124
+ },
125
+ {
126
+ "epoch": 20.0,
127
+ "learning_rate": 0.0001875,
128
+ "loss": 2.2303,
129
+ "step": 5740
130
+ },
131
+ {
132
+ "epoch": 21.0,
133
+ "learning_rate": 0.000171875,
134
+ "loss": 2.2215,
135
+ "step": 6027
136
+ },
137
+ {
138
+ "epoch": 22.0,
139
+ "learning_rate": 0.00015625,
140
+ "loss": 2.2256,
141
+ "step": 6314
142
+ },
143
+ {
144
+ "epoch": 23.0,
145
+ "learning_rate": 0.00014062500000000002,
146
+ "loss": 2.2257,
147
+ "step": 6601
148
+ },
149
+ {
150
+ "epoch": 24.0,
151
+ "learning_rate": 0.000125,
152
+ "loss": 2.2275,
153
+ "step": 6888
154
+ },
155
+ {
156
+ "epoch": 25.0,
157
+ "learning_rate": 0.000109375,
158
+ "loss": 2.2225,
159
+ "step": 7175
160
+ },
161
+ {
162
+ "epoch": 26.0,
163
+ "learning_rate": 9.375e-05,
164
+ "loss": 2.2166,
165
+ "step": 7462
166
+ },
167
+ {
168
+ "epoch": 27.0,
169
+ "learning_rate": 7.8125e-05,
170
+ "loss": 2.2174,
171
+ "step": 7749
172
+ },
173
+ {
174
+ "epoch": 28.0,
175
+ "learning_rate": 6.25e-05,
176
+ "loss": 2.2188,
177
+ "step": 8036
178
+ },
179
+ {
180
+ "epoch": 29.0,
181
+ "learning_rate": 4.6875e-05,
182
+ "loss": 2.2143,
183
+ "step": 8323
184
+ }
185
+ ],
186
+ "logging_steps": 500,
187
+ "max_steps": 9184,
188
+ "num_input_tokens_seen": 0,
189
+ "num_train_epochs": 32,
190
+ "save_steps": 500,
191
+ "total_flos": 0.0,
192
+ "train_batch_size": 64,
193
+ "trial_name": null,
194
+ "trial_params": null
195
+ }
checkpoint-8323/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e68121a9357c4f016eb6bc0f031c8d8d3f664e26a8b5ed965be82c62d99c0bf
3
+ size 4792