vishnun25 commited on
Commit
d2346ef
1 Parent(s): 4770722
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - null
7
+ model_index:
8
+ - name: distilgpt2-finetuned-distilgpt2-med_articles
9
+ results:
10
+ - task:
11
+ name: Causal Language Modeling
12
+ type: text-generation
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # distilgpt2-finetuned-distilgpt2-med_articles
19
+
20
+ This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 3.5189
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 2e-05
42
+ - train_batch_size: 8
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 5
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | No log | 1.0 | 260 | 3.6093 |
54
+ | 3.7297 | 2.0 | 520 | 3.5603 |
55
+ | 3.7297 | 3.0 | 780 | 3.5338 |
56
+ | 3.5313 | 4.0 | 1040 | 3.5232 |
57
+ | 3.5313 | 5.0 | 1300 | 3.5189 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - Transformers 4.9.2
63
+ - Pytorch 1.9.0+cu102
64
+ - Datasets 1.11.0
65
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "gradient_checkpointing": false,
13
+ "id2label": {
14
+ "0": "LABEL_0"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_epsilon": 1e-05,
21
+ "model_type": "gpt2",
22
+ "n_ctx": 1024,
23
+ "n_embd": 768,
24
+ "n_head": 12,
25
+ "n_inner": null,
26
+ "n_layer": 6,
27
+ "n_positions": 1024,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.9.2",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f47bdd9cc0f58241e8c3723535e1d140733ebdbdacb62fa8bb46ce5635eb176e
3
+ size 333972957
runs/Aug19_09-07-02_a08bd7a25748/1629364060.2303417/events.out.tfevents.1629364060.a08bd7a25748.75.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5976c3463eb4afef088c8ca08e92423eb3b9b1546c3b58194db40f7d0508124
3
+ size 4205
runs/Aug19_09-07-02_a08bd7a25748/events.out.tfevents.1629364060.a08bd7a25748.75.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26000a6883f76be30583af524de581731a2fd8f198be16e33cec31d48fa09ce
3
+ size 5287
runs/Aug19_09-07-02_a08bd7a25748/events.out.tfevents.1629365091.a08bd7a25748.75.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e931fff94155c9cb07eb3b527c81cbf48f434985ee1f0133323d17e2963712e
3
+ size 311
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "distilgpt2", "tokenizer_class": "GPT2Tokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0405174ec65f57997442cf28a7725916e28a12529b30095d35752bd25f6282c5
3
+ size 2671
vocab.json ADDED
The diff for this file is too large to render. See raw diff