ubaada commited on
Commit
ae3858b
1 Parent(s): 5c5032c

ubaada/pegasus-x-large-booksum-16k

Browse files
Files changed (4) hide show
  1. README.md +6 -5
  2. config.json +1 -1
  3. generation_config.json +2 -1
  4. training_args.bin +2 -2
README.md CHANGED
@@ -13,6 +13,7 @@ model-index:
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
 
16
  # long-t5-tglobal-base
17
 
18
  This model is a fine-tuned version of [google/long-t5-tglobal-base](https://huggingface.co/google/long-t5-tglobal-base) on an unknown dataset.
@@ -44,13 +45,13 @@ The following hyperparameters were used during training:
44
  - eval_batch_size: 1
45
  - seed: 42
46
  - distributed_type: multi-GPU
47
- - num_devices: 2
48
  - gradient_accumulation_steps: 4
49
- - total_train_batch_size: 64
50
- - total_eval_batch_size: 2
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: linear
53
- - num_epochs: 9
54
 
55
  ### Training results
56
 
@@ -69,7 +70,7 @@ The following hyperparameters were used during training:
69
 
70
  ### Framework versions
71
 
72
- - Transformers 4.40.2
73
  - Pytorch 2.2.0
74
  - Datasets 2.19.1
75
  - Tokenizers 0.19.1
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/theubaada/huggingface/runs/iqgj3vcb)
17
  # long-t5-tglobal-base
18
 
19
  This model is a fine-tuned version of [google/long-t5-tglobal-base](https://huggingface.co/google/long-t5-tglobal-base) on an unknown dataset.
 
45
  - eval_batch_size: 1
46
  - seed: 42
47
  - distributed_type: multi-GPU
48
+ - num_devices: 4
49
  - gradient_accumulation_steps: 4
50
+ - total_train_batch_size: 128
51
+ - total_eval_batch_size: 4
52
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
  - lr_scheduler_type: linear
54
+ - num_epochs: 13
55
 
56
  ### Training results
57
 
 
70
 
71
  ### Framework versions
72
 
73
+ - Transformers 4.41.0
74
  - Pytorch 2.2.0
75
  - Datasets 2.19.1
76
  - Tokenizers 0.19.1
config.json CHANGED
@@ -29,7 +29,7 @@
29
  "relative_attention_num_buckets": 32,
30
  "tie_word_embeddings": false,
31
  "torch_dtype": "float32",
32
- "transformers_version": "4.40.2",
33
  "use_cache": true,
34
  "vocab_size": 32128
35
  }
 
29
  "relative_attention_num_buckets": 32,
30
  "tie_word_embeddings": false,
31
  "torch_dtype": "float32",
32
+ "transformers_version": "4.41.0",
33
  "use_cache": true,
34
  "vocab_size": 32128
35
  }
generation_config.json CHANGED
@@ -1,9 +1,10 @@
1
  {
 
2
  "decoder_start_token_id": 0,
3
  "eos_token_id": 1,
4
  "max_length": 2048,
5
  "num_beams": 5,
6
  "pad_token_id": 0,
7
  "repetition_penalty": 2.0,
8
- "transformers_version": "4.40.2"
9
  }
 
1
  {
2
+ "_from_model_config": true,
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "max_length": 2048,
6
  "num_beams": 5,
7
  "pad_token_id": 0,
8
  "repetition_penalty": 2.0,
9
+ "transformers_version": "4.41.0"
10
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c611d9f9ff8ec098da6eb03453782a6458e06a90779c1927d1d621248f690dfb
3
- size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74aca712e060cee865ad4838d5b8a095000e733bff434150a5184cdcfed49e2a
3
+ size 6968