AlekseyKorshuk commited on
Commit
7612f16
1 Parent(s): 7735708

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/the-beatles")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1twc71z8/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on The Beatles's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/3vbolrfq) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/3vbolrfq/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/the-beatles")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/2p2c5864/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on The Beatles's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/286vzjah) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/286vzjah/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "huggingartists/the-beatles",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -18,7 +18,9 @@
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
 
21
  "resid_pdrop": 0.1,
 
22
  "scale_attn_weights": true,
23
  "summary_activation": null,
24
  "summary_first_dropout": 0.1,
@@ -28,14 +30,14 @@
28
  "task_specific_params": {
29
  "text-generation": {
30
  "do_sample": true,
31
- "max_length": 200,
32
- "min_length": 100,
33
  "temperature": 1.0,
34
  "top_p": 0.95
35
  }
36
  },
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.10.2",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
 
1
  {
2
+ "_name_or_path": "the-beatles",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
  "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
  "scale_attn_weights": true,
25
  "summary_activation": null,
26
  "summary_first_dropout": 0.1,
 
30
  "task_specific_params": {
31
  "text-generation": {
32
  "do_sample": true,
33
+ "max_length": 150,
34
+ "min_length": 80,
35
  "temperature": 1.0,
36
  "top_p": 0.95
37
  }
38
  },
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.16.2",
41
  "use_cache": true,
42
  "vocab_size": 50257
43
  }
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 2.5396728515625, "eval_runtime": 7.0101, "eval_samples_per_second": 22.111, "eval_steps_per_second": 2.853, "epoch": 2.0}
 
1
+ {"eval_loss": 2.4675137996673584, "eval_runtime": 3.0, "eval_samples_per_second": 79.001, "eval_steps_per_second": 10.0, "epoch": 3.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3f36c2f7c7ddcb5a0ba27a0a735f4eeea2faf652a081d5446fcfd3063a5e91d
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac288e6d24ad64bc0400afea52acf43fb877278427c0b94bfc66a5e74ab0118
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01d0c006bc4779d8568c0522e5b4dffe3f82a222541b07e498cafa70e35560b2
3
- size 995603825
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0770016d959230c09406eebaf5541fda583a4ca2669877f7960882db3139a0f2
3
+ size 995604017
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24cb962a13630d160fb8f252859dfc4a16bbf489188271190908493e98b8bd48
3
  size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb81af3c77a402600bd26dea9bb806249d28dbe73e02b04fa00c40d474e5d983
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf4704f411f7c64c230e3a475c23d3112f179d24c4827bb93cc3c981ff7942d5
3
  size 14567
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a3d0787ebc876fb912a62da619ad27191a3237d59b63f8eef2a74d4bcfbced2
3
  size 14567
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1403a00d87496c96b524036f4c8c82be5fad3d7a1c0f59e9835444c0e5d9468
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f4ff9efcd89b539e35b193ff42bd66cb0681357f0d8d3d42b3ccb073e3b235b
3
  size 623
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 2.5396728515625,
3
- "best_model_checkpoint": "output/the-beatles/checkpoint-250",
4
- "epoch": 2.0,
5
- "global_step": 250,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -314,11 +314,133 @@
314
  "eval_samples_per_second": 22.653,
315
  "eval_steps_per_second": 2.923,
316
  "step": 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  }
318
  ],
319
- "max_steps": 250,
320
- "num_train_epochs": 2,
321
- "total_flos": 260246863872000.0,
322
  "trial_name": null,
323
  "trial_params": null
324
  }
 
1
  {
2
+ "best_metric": 2.4675137996673584,
3
+ "best_model_checkpoint": "output/the-beatles/checkpoint-345",
4
+ "epoch": 3.0,
5
+ "global_step": 345,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
314
  "eval_samples_per_second": 22.653,
315
  "eval_steps_per_second": 2.923,
316
  "step": 250
317
+ },
318
+ {
319
+ "epoch": 2.22,
320
+ "learning_rate": 0.0001218137945423232,
321
+ "loss": 2.2031,
322
+ "step": 255
323
+ },
324
+ {
325
+ "epoch": 2.26,
326
+ "learning_rate": 0.00011542314562479972,
327
+ "loss": 2.0681,
328
+ "step": 260
329
+ },
330
+ {
331
+ "epoch": 2.3,
332
+ "learning_rate": 0.0001081602700970799,
333
+ "loss": 2.2497,
334
+ "step": 265
335
+ },
336
+ {
337
+ "epoch": 2.35,
338
+ "learning_rate": 0.00010016046158835702,
339
+ "loss": 2.1648,
340
+ "step": 270
341
+ },
342
+ {
343
+ "epoch": 2.39,
344
+ "learning_rate": 9.157274139492967e-05,
345
+ "loss": 2.2327,
346
+ "step": 275
347
+ },
348
+ {
349
+ "epoch": 2.43,
350
+ "learning_rate": 8.255708249541068e-05,
351
+ "loss": 2.2978,
352
+ "step": 280
353
+ },
354
+ {
355
+ "epoch": 2.48,
356
+ "learning_rate": 7.328142955681647e-05,
357
+ "loss": 2.2115,
358
+ "step": 285
359
+ },
360
+ {
361
+ "epoch": 2.52,
362
+ "learning_rate": 6.391857044318358e-05,
363
+ "loss": 2.2139,
364
+ "step": 290
365
+ },
366
+ {
367
+ "epoch": 2.57,
368
+ "learning_rate": 5.464291750458936e-05,
369
+ "loss": 2.3337,
370
+ "step": 295
371
+ },
372
+ {
373
+ "epoch": 2.61,
374
+ "learning_rate": 4.5627258605070364e-05,
375
+ "loss": 2.0981,
376
+ "step": 300
377
+ },
378
+ {
379
+ "epoch": 2.65,
380
+ "learning_rate": 3.703953841164307e-05,
381
+ "loss": 2.1107,
382
+ "step": 305
383
+ },
384
+ {
385
+ "epoch": 2.7,
386
+ "learning_rate": 2.9039729902920186e-05,
387
+ "loss": 2.1893,
388
+ "step": 310
389
+ },
390
+ {
391
+ "epoch": 2.74,
392
+ "learning_rate": 2.177685437520028e-05,
393
+ "loss": 2.353,
394
+ "step": 315
395
+ },
396
+ {
397
+ "epoch": 2.78,
398
+ "learning_rate": 1.5386205457676823e-05,
399
+ "loss": 2.1984,
400
+ "step": 320
401
+ },
402
+ {
403
+ "epoch": 2.83,
404
+ "learning_rate": 9.986828848110892e-06,
405
+ "loss": 2.209,
406
+ "step": 325
407
+ },
408
+ {
409
+ "epoch": 2.87,
410
+ "learning_rate": 5.679304716725967e-06,
411
+ "loss": 2.2126,
412
+ "step": 330
413
+ },
414
+ {
415
+ "epoch": 2.91,
416
+ "learning_rate": 2.5438740879409566e-06,
417
+ "loss": 2.1803,
418
+ "step": 335
419
+ },
420
+ {
421
+ "epoch": 2.96,
422
+ "learning_rate": 6.389441019077178e-07,
423
+ "loss": 2.0247,
424
+ "step": 340
425
+ },
426
+ {
427
+ "epoch": 3.0,
428
+ "learning_rate": 0.0,
429
+ "loss": 2.3114,
430
+ "step": 345
431
+ },
432
+ {
433
+ "epoch": 3.0,
434
+ "eval_loss": 2.4675137996673584,
435
+ "eval_runtime": 2.9387,
436
+ "eval_samples_per_second": 80.649,
437
+ "eval_steps_per_second": 10.209,
438
+ "step": 345
439
  }
440
  ],
441
+ "max_steps": 345,
442
+ "num_train_epochs": 3,
443
+ "total_flos": 359015251968000.0,
444
  "trial_name": null,
445
  "trial_params": null
446
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce42f54e5d08f9c1684b110fb05bbd513348d13b90f70989ccfab6b0fc97e2f0
3
- size 2671
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24f3d64f28ad5e687650bdbd509c3d4c83b36b3b61563bd6a8a2322f9b5a7fc0
3
+ size 3055