AlekseyKorshuk commited on
Commit
97e4998
1 Parent(s): 3957926

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/the-the-pigs")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/s5khturf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on The ‘’Вепри’’ (The Pigs)'s lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/2wm2oonn) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/2wm2oonn/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
45
  dataset = load_dataset("huggingartists/the-the-pigs")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/7yh65db9/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on The ‘’Вепри’’ (The Pigs)'s lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/65gj1lk1) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/65gj1lk1/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
1
  {
2
+ "_name_or_path": "huggingartists/the-the-pigs",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 1.7347867488861084, "eval_runtime": 0.9361, "eval_samples_per_second": 20.297, "eval_steps_per_second": 3.205, "epoch": 20.0}
1
+ {"eval_loss": 1.621703028678894, "eval_runtime": 1.0514, "eval_samples_per_second": 20.924, "eval_steps_per_second": 2.853, "epoch": 109.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c02c32ea8cbf7c2e61290b1d72ca0369e968b66bb1edd7b4464b23c5092af01b
3
  size 497764120
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cac01e3e8e7334767c029651df67fe66fe2897cde82b9c9c859c035aa779d50
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d794e2cf1e854660cf8c67241054d968e9933fc96e0ac960005808d8e19da9a
3
  size 995603825
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb6298c1ee9d27085b6b26eeee4bb5bd451b57bf370bbf0250eacb1122a8406
3
  size 995603825
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f13a43a42b8448df12dbb6b75830224c036b3bcb9c128c9833c197779081b8a6
3
  size 510403817
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df20a48917965ea1b00db1df1514df26f7f6ba42fdaa12aa96e21f8d425f972a
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70c1257d23e86fa50a3c449eb97c44e917eeff6eada6271ca3edb44fa461f708
3
- size 14567
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7128092bbf4a37694370d36c94bbee45377288c60216893350826a65beba17c
3
+ size 14503
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8eb138d3e74e4ee9a8cfdbf6948246533add92d0858995ee833af706c95c53c
3
  size 623
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f39323abdebb283bc5c2151a021734faafaf232e59d003177bb41167a1a31b6
3
  size 623
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "huggingartists/the-the-pigs", "tokenizer_class": "GPT2Tokenizer"}
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 1.7347867488861084,
3
- "best_model_checkpoint": "output/the-the-pigs/checkpoint-117",
4
- "epoch": 9.0,
5
- "global_step": 117,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -216,11 +216,45 @@
216
  "eval_samples_per_second": 22.327,
217
  "eval_steps_per_second": 3.525,
218
  "step": 117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  }
220
  ],
221
- "max_steps": 260,
222
- "num_train_epochs": 20,
223
- "total_flos": 115229786112000.0,
224
  "trial_name": null,
225
  "trial_params": null
226
  }
1
  {
2
+ "best_metric": 1.621703028678894,
3
+ "best_model_checkpoint": "output/the-the-pigs/checkpoint-132",
4
+ "epoch": 11.0,
5
+ "global_step": 132,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
216
  "eval_samples_per_second": 22.327,
217
  "eval_steps_per_second": 3.525,
218
  "step": 117
219
+ },
220
+ {
221
+ "epoch": 10.0,
222
+ "learning_rate": 0.0001372,
223
+ "loss": 1.6342,
224
+ "step": 120
225
+ },
226
+ {
227
+ "epoch": 10.0,
228
+ "eval_loss": 1.6437487602233887,
229
+ "eval_runtime": 0.9748,
230
+ "eval_samples_per_second": 22.569,
231
+ "eval_steps_per_second": 3.078,
232
+ "step": 120
233
+ },
234
+ {
235
+ "epoch": 10.42,
236
+ "learning_rate": 8.635498649403298e-05,
237
+ "loss": 1.7365,
238
+ "step": 125
239
+ },
240
+ {
241
+ "epoch": 10.83,
242
+ "learning_rate": 9.190657300387535e-06,
243
+ "loss": 1.6443,
244
+ "step": 130
245
+ },
246
+ {
247
+ "epoch": 11.0,
248
+ "eval_loss": 1.621703028678894,
249
+ "eval_runtime": 1.0058,
250
+ "eval_samples_per_second": 21.873,
251
+ "eval_steps_per_second": 2.983,
252
+ "step": 132
253
  }
254
  ],
255
+ "max_steps": 1308,
256
+ "num_train_epochs": 109,
257
+ "total_flos": 130384723968000.0,
258
  "trial_name": null,
259
  "trial_params": null
260
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:115de7483970d263e0616e850a6e0d52e43c047dc3d58f7fce41ea06d8252d5f
3
  size 2671
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d3101d9dcf343ede445e4a706f3bbc279ebaf47084dfddf4e639933ea0a8c3
3
  size 2671