defex commited on
Commit
2cbe859
1 Parent(s): b87aee5

my-awesome-model

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ datasets:
5
+ - null
6
+ model_index:
7
+ - name: distilgpt2-finetuned-amazon-reviews
8
+ results:
9
+ - task:
10
+ name: Causal Language Modeling
11
+ type: text-generation
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # distilgpt2-finetuned-amazon-reviews
18
+
19
+ This model was trained from scratch on the None dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 8
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - num_epochs: 3.0
45
+
46
+ ### Framework versions
47
+
48
+ - Transformers 4.8.2
49
+ - Pytorch 1.9.0+cu102
50
+ - Datasets 1.9.0
51
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/test-clm/checkpoint-23500",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "gradient_checkpointing": false,
13
+ "id2label": {
14
+ "0": "LABEL_0"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_epsilon": 1e-05,
21
+ "model_type": "gpt2",
22
+ "n_ctx": 1024,
23
+ "n_embd": 768,
24
+ "n_head": 12,
25
+ "n_inner": null,
26
+ "n_layer": 6,
27
+ "n_positions": 1024,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "transformers_version": "4.8.2",
42
+ "use_cache": true,
43
+ "vocab_size": 50257
44
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcc9c3d19788cc9b4c3929cc4b386999a55f01f13cd86d2de8536856b6cab5af
3
+ size 333972957
runs/Jul21_08-20-09_fd3fa8d86bcc/1626855630.467104/events.out.tfevents.1626855630.fd3fa8d86bcc.63.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cbeec289bba025c0e6a8ae95e34e3d2ee2805476d9654ccc37cc132a23c047c
3
+ size 4199
runs/Jul21_08-20-09_fd3fa8d86bcc/events.out.tfevents.1626855630.fd3fa8d86bcc.63.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d33026364b4a49457037539be0f9af78687e99a2e0d683577497058a2c2e571
3
+ size 11858
runs/Jul21_08-20-09_fd3fa8d86bcc/events.out.tfevents.1626860941.fd3fa8d86bcc.63.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf11cc98d0c3554047350551a0a9a57f12d064350df664376d3f301f941a774c
3
+ size 316
runs/Jul21_10-14-06_fd3fa8d86bcc/events.out.tfevents.1626862472.fd3fa8d86bcc.4845.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930958136111653b98b971bb160e9333ff53a3d8c8cb4b5a907de15ac4de372f
3
+ size 249
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f613d3d7b56def3683c4b0fabf6f97a496b1ad34bc98ac6ec295790e60d8a9
3
+ size 2671