calum commited on
Commit
142fb93
1 Parent(s): 4720036

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/README-checkpoint.md +50 -0
  2. README.md +50 -0
  3. added_tokens.json +3 -0
  4. all_results.json +8 -0
  5. checkpoint-395000/added_tokens.json +3 -0
  6. checkpoint-395000/config.json +31 -0
  7. checkpoint-395000/generation_config.json +6 -0
  8. checkpoint-395000/merges.txt +0 -0
  9. checkpoint-395000/optimizer.pt +3 -0
  10. checkpoint-395000/pytorch_model.bin +3 -0
  11. checkpoint-395000/rng_state.pth +3 -0
  12. checkpoint-395000/scheduler.pt +3 -0
  13. checkpoint-395000/special_tokens_map.json +5 -0
  14. checkpoint-395000/tokenizer.json +0 -0
  15. checkpoint-395000/tokenizer_config.json +20 -0
  16. checkpoint-395000/trainer_state.json +0 -0
  17. checkpoint-395000/training_args.bin +3 -0
  18. checkpoint-395000/vocab.json +0 -0
  19. checkpoint-395500/added_tokens.json +3 -0
  20. checkpoint-395500/config.json +31 -0
  21. checkpoint-395500/generation_config.json +6 -0
  22. checkpoint-395500/merges.txt +0 -0
  23. checkpoint-395500/optimizer.pt +3 -0
  24. checkpoint-395500/pytorch_model.bin +3 -0
  25. checkpoint-395500/rng_state.pth +3 -0
  26. checkpoint-395500/scheduler.pt +3 -0
  27. checkpoint-395500/special_tokens_map.json +5 -0
  28. checkpoint-395500/tokenizer.json +0 -0
  29. checkpoint-395500/tokenizer_config.json +20 -0
  30. checkpoint-395500/trainer_state.json +0 -0
  31. checkpoint-395500/training_args.bin +3 -0
  32. checkpoint-395500/vocab.json +0 -0
  33. checkpoint-396000/added_tokens.json +3 -0
  34. checkpoint-396000/config.json +31 -0
  35. checkpoint-396000/generation_config.json +6 -0
  36. checkpoint-396000/merges.txt +0 -0
  37. checkpoint-396000/optimizer.pt +3 -0
  38. checkpoint-396000/pytorch_model.bin +3 -0
  39. checkpoint-396000/rng_state.pth +3 -0
  40. checkpoint-396000/scheduler.pt +3 -0
  41. checkpoint-396000/special_tokens_map.json +5 -0
  42. checkpoint-396000/tokenizer.json +0 -0
  43. checkpoint-396000/tokenizer_config.json +20 -0
  44. checkpoint-396000/trainer_state.json +0 -0
  45. checkpoint-396000/training_args.bin +3 -0
  46. checkpoint-396000/vocab.json +0 -0
  47. checkpoint-396500/added_tokens.json +3 -0
  48. checkpoint-396500/config.json +31 -0
  49. checkpoint-396500/generation_config.json +6 -0
  50. checkpoint-396500/merges.txt +0 -0
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: out
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # out
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 5e-05
34
+ - train_batch_size: 16
35
+ - eval_batch_size: 16
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: linear
39
+ - num_epochs: 3.0
40
+
41
+ ### Training results
42
+
43
+
44
+
45
+ ### Framework versions
46
+
47
+ - Transformers 4.35.0.dev0
48
+ - Pytorch 2.0.1+cu118
49
+ - Datasets 2.14.5
50
+ - Tokenizers 0.14.1
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: out
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # out
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 5e-05
34
+ - train_batch_size: 16
35
+ - eval_batch_size: 16
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: linear
39
+ - num_epochs: 3.0
40
+
41
+ ### Training results
42
+
43
+
44
+
45
+ ### Framework versions
46
+
47
+ - Transformers 4.35.0.dev0
48
+ - Pytorch 2.0.1+cu118
49
+ - Datasets 2.14.5
50
+ - Tokenizers 0.14.1
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.30394269704031157,
4
+ "train_runtime": 3702.8677,
5
+ "train_samples": 2132808,
6
+ "train_samples_per_second": 1727.965,
7
+ "train_steps_per_second": 107.998
8
+ }
checkpoint-395000/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-395000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 64,
14
+ "n_head": 16,
15
+ "n_inner": null,
16
+ "n_layer": 8,
17
+ "n_positions": 1024,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.35.0.dev0",
29
+ "use_cache": true,
30
+ "vocab_size": 50257
31
+ }
checkpoint-395000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.35.0.dev0"
6
+ }
checkpoint-395000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9030cc2c2d6c7de5f6344397bf968ce3ed5326111cf72814c66eee6542cc025
3
+ size 29538757
checkpoint-395000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:274c4b2987f61171a94338ca3ffdae4de92f03b482eb95b3af02752a04a37cf2
3
+ size 14759965
checkpoint-395000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ef384701047e2979bb422ad6f5253e070626bab23590e4dddce4e05f0217c3c
3
+ size 14575
checkpoint-395000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7358d1b6b22893af2bb8608fbd26ee7737cfeb03f3bb6d95de545ccb431e1d2b
3
+ size 627
checkpoint-395000/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-395000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395000/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-395000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa402d2b13051fd62da05e132439b83f33b16c091bd2fa2aafd024899609026
3
+ size 3963
checkpoint-395000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395500/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-395500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 64,
14
+ "n_head": 16,
15
+ "n_inner": null,
16
+ "n_layer": 8,
17
+ "n_positions": 1024,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.35.0.dev0",
29
+ "use_cache": true,
30
+ "vocab_size": 50257
31
+ }
checkpoint-395500/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.35.0.dev0"
6
+ }
checkpoint-395500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff5d39f9c94f3cdd77c2bee4b8b8c98d7729d8f81c06c3674f701d330c2037ea
3
+ size 29538757
checkpoint-395500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:460623a7ee3fded80ee87dab971cefcd4db6983451c8b39526e45ccc1ceb76f2
3
+ size 14759965
checkpoint-395500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2911799e686e45756f30a39b88c665086ba5f9c0cad0f45744445279767515b
3
+ size 14575
checkpoint-395500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4ccd475028e00dce935b535603c6eea7231afac19f7c4365f0f6d1e0c9984cf
3
+ size 627
checkpoint-395500/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-395500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395500/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-395500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-395500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa402d2b13051fd62da05e132439b83f33b16c091bd2fa2aafd024899609026
3
+ size 3963
checkpoint-395500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-396000/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-396000/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 64,
14
+ "n_head": 16,
15
+ "n_inner": null,
16
+ "n_layer": 8,
17
+ "n_positions": 1024,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.35.0.dev0",
29
+ "use_cache": true,
30
+ "vocab_size": 50257
31
+ }
checkpoint-396000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.35.0.dev0"
6
+ }
checkpoint-396000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-396000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a34b628d1924a392d1c1394159efef193ff1aec9e44b36a8df287a9bb686cb45
3
+ size 29538757
checkpoint-396000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b9f5496fc14b98a4a53cd46564aa72dc995cd98dcfffc5330b8014c6fe0a54c
3
+ size 14759965
checkpoint-396000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e4ba75fef28319dff2fa74e0f2a555f8c74fe7e67056a7badf3efe90f1034f8
3
+ size 14575
checkpoint-396000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e1b98e2c3ed130c858d338ccb8292ac76b607f9fbe9a8a66072a803e295cabd
3
+ size 627
checkpoint-396000/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
checkpoint-396000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-396000/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-396000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-396000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa402d2b13051fd62da05e132439b83f33b16c091bd2fa2aafd024899609026
3
+ size 3963
checkpoint-396000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-396500/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-396500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_embd": 64,
14
+ "n_head": 16,
15
+ "n_inner": null,
16
+ "n_layer": 8,
17
+ "n_positions": 1024,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.1,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.35.0.dev0",
29
+ "use_cache": true,
30
+ "vocab_size": 50257
31
+ }
checkpoint-396500/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.35.0.dev0"
6
+ }
checkpoint-396500/merges.txt ADDED
The diff for this file is too large to render. See raw diff