marcderbauer commited on
Commit
14113fb
1 Parent(s): e96ffa9

Upload . with huggingface_hub (#1)

Browse files

- Upload . with huggingface_hub (aab8f97f45c6d330b6f4c395d3aa3f3e347d36b1)

Files changed (7) hide show
  1. config.json +33 -0
  2. optimizer.pt +3 -0
  3. pytorch_model.bin +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +84 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bigscience/bloom-560m",
3
+ "apply_residual_connection_post_layernorm": false,
4
+ "architectures": [
5
+ "BloomForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "attention_softmax_in_fp32": true,
9
+ "bias_dropout_fusion": true,
10
+ "bos_token_id": 1,
11
+ "eos_token_id": 2,
12
+ "hidden_dropout": 0.0,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "layer_norm_epsilon": 1e-05,
16
+ "masked_softmax_fusion": true,
17
+ "model_type": "bloom",
18
+ "n_head": 16,
19
+ "n_inner": null,
20
+ "n_layer": 24,
21
+ "offset_alibi": 100,
22
+ "pad_token_id": 3,
23
+ "pretraining_tp": 1,
24
+ "seq_length": 2048,
25
+ "skip_bias_add": true,
26
+ "skip_bias_add_qkv": false,
27
+ "slow_but_exact": false,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.22.0",
30
+ "unk_token_id": 0,
31
+ "use_cache": true,
32
+ "vocab_size": 250880
33
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00356c4ec79b7c77d561ecaab1a6c680ef6bf274ce641cd570e935d52cc86fe4
3
+ size 4473880501
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99626a59946fbcb9fad13ee1029e247697cafaf54a0c262af62314aeed3f79b6
3
+ size 2236953377
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c17fa06ae763635fe0359eb08c545c3b7f89e80fc0247a7b24b8a77e4708bd61
3
+ size 13553
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12990764445c4d92cd3ecd547746aeb2443381908df99441dc8d7da081f15b8
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.142857142857143,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_loss": 2.3697612285614014,
13
+ "eval_runtime": 53.7583,
14
+ "eval_samples_per_second": 4.874,
15
+ "eval_steps_per_second": 0.614,
16
+ "step": 140
17
+ },
18
+ {
19
+ "epoch": 2.0,
20
+ "eval_loss": 2.350198745727539,
21
+ "eval_runtime": 52.6608,
22
+ "eval_samples_per_second": 4.975,
23
+ "eval_steps_per_second": 0.627,
24
+ "step": 280
25
+ },
26
+ {
27
+ "epoch": 3.0,
28
+ "eval_loss": 2.6000664234161377,
29
+ "eval_runtime": 51.3966,
30
+ "eval_samples_per_second": 5.098,
31
+ "eval_steps_per_second": 0.642,
32
+ "step": 420
33
+ },
34
+ {
35
+ "epoch": 3.57,
36
+ "learning_rate": 1.2857142857142859e-05,
37
+ "loss": 1.8394,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 4.0,
42
+ "eval_loss": 3.011274576187134,
43
+ "eval_runtime": 51.1881,
44
+ "eval_samples_per_second": 5.118,
45
+ "eval_steps_per_second": 0.645,
46
+ "step": 560
47
+ },
48
+ {
49
+ "epoch": 5.0,
50
+ "eval_loss": 3.5172500610351562,
51
+ "eval_runtime": 53.2278,
52
+ "eval_samples_per_second": 4.922,
53
+ "eval_steps_per_second": 0.62,
54
+ "step": 700
55
+ },
56
+ {
57
+ "epoch": 6.0,
58
+ "eval_loss": 3.840390682220459,
59
+ "eval_runtime": 53.3892,
60
+ "eval_samples_per_second": 4.907,
61
+ "eval_steps_per_second": 0.618,
62
+ "step": 840
63
+ },
64
+ {
65
+ "epoch": 7.0,
66
+ "eval_loss": 4.020865440368652,
67
+ "eval_runtime": 52.1972,
68
+ "eval_samples_per_second": 5.019,
69
+ "eval_steps_per_second": 0.632,
70
+ "step": 980
71
+ },
72
+ {
73
+ "epoch": 7.14,
74
+ "learning_rate": 5.7142857142857145e-06,
75
+ "loss": 0.4408,
76
+ "step": 1000
77
+ }
78
+ ],
79
+ "max_steps": 1400,
80
+ "num_train_epochs": 10,
81
+ "total_flos": 1857413971968000.0,
82
+ "trial_name": null,
83
+ "trial_params": null
84
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ac49390de10588789eb996968293eaf1dabcacab3d598975996a668bbec820
3
+ size 3387