adamjweintraut commited on
Commit
2618291
1 Parent(s): 76faca3

bart-finetuned-kwsylgen-64-Rerun

Browse files
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: adamjweintraut/bart-finetuned-lyrlen-64-lines
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: bart-finetuned-kwsylgen-64-Rerun
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # bart-finetuned-kwsylgen-64-Rerun
15
+
16
+ This model is a fine-tuned version of [adamjweintraut/bart-finetuned-lyrlen-64-lines](https://huggingface.co/adamjweintraut/bart-finetuned-lyrlen-64-lines) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.4231
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-05
38
+ - train_batch_size: 64
39
+ - eval_batch_size: 64
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 1
44
+ - mixed_precision_training: Native AMP
45
+
46
+ ### Training results
47
+
48
+ | Training Loss | Epoch | Step | Validation Loss |
49
+ |:-------------:|:-----:|:----:|:---------------:|
50
+ | 0.4862 | 0.18 | 500 | 0.4553 |
51
+ | 0.4128 | 0.36 | 1000 | 0.4430 |
52
+ | 0.3923 | 0.54 | 1500 | 0.4311 |
53
+ | 0.3789 | 0.72 | 2000 | 0.4255 |
54
+ | 0.3722 | 0.9 | 2500 | 0.4231 |
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - Transformers 4.38.2
60
+ - Pytorch 2.2.1+cu121
61
+ - Datasets 2.18.0
62
+ - Tokenizers 0.15.2
generation_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "clean_up_tokenization_spaces": true,
4
+ "decoder_start_token_id": 2,
5
+ "do_sample": true,
6
+ "early_stopping": true,
7
+ "eos_token_id": 2,
8
+ "forced_bos_token_id": 0,
9
+ "forced_eos_token_id": 2,
10
+ "max_new_tokens": 64,
11
+ "n_examples": null,
12
+ "no_repeat_ngram_size": 2,
13
+ "num_beams": 4,
14
+ "pad_to_max_length": true,
15
+ "pad_token_id": 2,
16
+ "padding": "max_length",
17
+ "renormalize_logits": true,
18
+ "skip_special_tokens": true,
19
+ "temperature": 0.85,
20
+ "top_k": 0,
21
+ "top_p": 0.9,
22
+ "transformers_version": "4.38.2",
23
+ "truncation": true
24
+ }
runs/Apr14_02-39-01_d3ce5ccb487f/events.out.tfevents.1713062342.d3ce5ccb487f.1580.6 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89b80d7087cd5d930bda58be7ce01133b79c7892d9a68f0cd78d60cc199f4446
3
- size 8623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4373d42ec88517caa9d4cd9179a456d314c132da582b199e88f3b7016cda84f
3
+ size 8977