moos124 commited on
Commit
cf45e02
·
verified ·
1 Parent(s): 5e64d96

Upload Kaggle TPU pretrained tiny causal LM

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "add_cross_attention": false,
4
+ "attn_pdrop": 0.0,
5
+ "bos_token_id": 50256,
6
+ "embd_pdrop": 0.0,
7
+ "eos_token_id": 50256,
8
+ "initializer_range": 0.02,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "gpt2",
11
+ "n_ctx": 2048,
12
+ "n_embd": 768,
13
+ "n_head": 12,
14
+ "n_inner": null,
15
+ "n_layer": 12,
16
+ "n_positions": 2048,
17
+ "pad_token_id": null,
18
+ "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.0,
20
+ "scale_attn_by_inverse_layer_idx": false,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "tie_word_embeddings": true,
28
+ "transformers_version": "5.8.0",
29
+ "use_cache": false,
30
+ "vocab_size": 50257
31
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:712620f9757e650a18a90a19d0a3fe7895cec55e742c8f33b4de14961245321c
3
+ size 655341431
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "errors": "replace",
7
+ "is_local": false,
8
+ "local_files_only": false,
9
+ "model_max_length": 1024,
10
+ "pad_token": "<|endoftext|>",
11
+ "tokenizer_class": "GPT2Tokenizer",
12
+ "unk_token": "<|endoftext|>"
13
+ }
training_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "LLM360/TxT360",
3
+ "dataset_config": null,
4
+ "split": "train",
5
+ "text_column": "text",
6
+ "streaming": true,
7
+ "download_rows": null,
8
+ "shuffle_buffer": 50000,
9
+ "preprocessing_batch_size": 128,
10
+ "iterable_shards_when_downloaded": 1024,
11
+ "tokenizer_name": "gpt2",
12
+ "block_size": 2048,
13
+ "model_preset": "tiny_125m",
14
+ "n_layer": null,
15
+ "n_embd": null,
16
+ "n_head": null,
17
+ "resid_pdrop": 0.0,
18
+ "embd_pdrop": 0.0,
19
+ "attn_pdrop": 0.0,
20
+ "gradient_checkpointing": false,
21
+ "max_parameters": 600000000,
22
+ "num_tpu_processes": 1,
23
+ "per_device_batch_size": 8,
24
+ "gradient_accumulation_steps": 4,
25
+ "max_steps": 10000,
26
+ "learning_rate": 0.0003,
27
+ "weight_decay": 0.1,
28
+ "beta1": 0.9,
29
+ "beta2": 0.95,
30
+ "warmup_steps": 100,
31
+ "max_grad_norm": 1.0,
32
+ "num_workers": 0,
33
+ "seed": 42,
34
+ "log_every": 20,
35
+ "save_every": 100,
36
+ "output_dir": "/kaggle/working/tiny-lm-tpu",
37
+ "resume_from": null,
38
+ "push_to_hub": true,
39
+ "hub_model_id": "moos124/tiny-lm-125m"
40
+ }