diao commited on
Commit
e60baa8
1 Parent(s): 6b62929

Uploaded models

Browse files
README.md CHANGED
@@ -1,3 +1,57 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - lmflow_instruction
7
+ model-index:
8
+ - name: 046_inst-tuning_model-gpt_neo2.7B_num-epoch-5_init-lr-2e-5_bf-16_blocksize768
9
+ results: []
10
  ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # 046_inst-tuning_model-gpt_neo2.7B_num-epoch-5_init-lr-2e-5_bf-16_blocksize768
16
+
17
+ This model is a fine-tuned version of [EleutherAI/gpt-neo-2.7B](https://huggingface.co/EleutherAI/gpt-neo-2.7B) on the lmflow_instruction dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 1
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - num_devices: 8
42
+ - total_train_batch_size: 64
43
+ - total_eval_batch_size: 8
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 5.0
47
+
48
+ ### Training results
49
+
50
+
51
+
52
+ ### Framework versions
53
+
54
+ - Transformers 4.27.0.dev0
55
+ - Pytorch 2.0.0+cu117
56
+ - Datasets 2.10.1
57
+ - Tokenizers 0.13.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.9227586579802852,
4
+ "train_runtime": 27558.1748,
5
+ "train_samples": 76238,
6
+ "train_samples_per_second": 13.832,
7
+ "train_steps_per_second": 0.216
8
+ }
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/gpt-neo-2.7B",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForCausalLM"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local",
21
+ "global",
22
+ "local",
23
+ "global",
24
+ "local",
25
+ "global",
26
+ "local",
27
+ "global",
28
+ "local",
29
+ "global",
30
+ "local",
31
+ "global",
32
+ "local",
33
+ "global",
34
+ "local",
35
+ "global",
36
+ "local",
37
+ "global",
38
+ "local",
39
+ "global",
40
+ "local"
41
+ ],
42
+ "attention_types": [
43
+ [
44
+ [
45
+ "global",
46
+ "local"
47
+ ],
48
+ 16
49
+ ]
50
+ ],
51
+ "bos_token_id": 50256,
52
+ "embed_dropout": 0,
53
+ "eos_token_id": 50256,
54
+ "gradient_checkpointing": false,
55
+ "hidden_size": 2560,
56
+ "initializer_range": 0.02,
57
+ "intermediate_size": null,
58
+ "layer_norm_epsilon": 1e-05,
59
+ "max_position_embeddings": 2048,
60
+ "model_type": "gpt_neo",
61
+ "num_heads": 20,
62
+ "num_layers": 32,
63
+ "resid_dropout": 0,
64
+ "summary_activation": null,
65
+ "summary_first_dropout": 0.1,
66
+ "summary_proj_to_labels": true,
67
+ "summary_type": "cls_index",
68
+ "summary_use_proj": true,
69
+ "task_specific_params": {
70
+ "text-generation": {
71
+ "do_sample": true,
72
+ "max_length": 50,
73
+ "temperature": 0.9
74
+ }
75
+ },
76
+ "tokenizer_class": "GPT2Tokenizer",
77
+ "torch_dtype": "bfloat16",
78
+ "transformers_version": "4.27.0.dev0",
79
+ "use_cache": true,
80
+ "vocab_size": 50257,
81
+ "window_size": 256
82
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.27.0.dev0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9980ee64803ab8f6eae8eca50e36c7b185799d4c6d2a8d65d0a200853a84bb33
3
+ size 5436910620
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 2048,
22
+ "pad_token": null,
23
+ "special_tokens_map_file": null,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.9227586579802852,
4
+ "train_runtime": 27558.1748,
5
+ "train_samples": 76238,
6
+ "train_samples_per_second": 13.832,
7
+ "train_steps_per_second": 0.216
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47ab44df49fbd6a633ab2953fb1c4cd2ef8dc68ef55688575596db2732081500
3
+ size 4859
vocab.json ADDED
The diff for this file is too large to render. See raw diff