KimByeongSu commited on
Commit
2d39f50
1 Parent(s): d225e29

gpt-neo-125m-cs-finetuning-10000-2

Browse files
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: EleutherAI/gpt-neo-125m
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: gpt-neo-125m-cs-finetuning-10000-2
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # gpt-neo-125m-cs-finetuning-10000-2
15
+
16
+ This model is a fine-tuned version of [EleutherAI/gpt-neo-125m](https://huggingface.co/EleutherAI/gpt-neo-125m) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 3.3213
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 2e-05
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 3.0
44
+
45
+ ### Training results
46
+
47
+ | Training Loss | Epoch | Step | Validation Loss |
48
+ |:-------------:|:-----:|:----:|:---------------:|
49
+ | No log | 1.0 | 130 | 3.3903 |
50
+ | No log | 2.0 | 260 | 3.3344 |
51
+ | No log | 3.0 | 390 | 3.3213 |
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.36.2
57
+ - Pytorch 1.13.1+cu117
58
+ - Datasets 2.14.6
59
+ - Tokenizers 0.15.0
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/gpt-neo-125m",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTNeoForCausalLM"
6
+ ],
7
+ "attention_dropout": 0,
8
+ "attention_layers": [
9
+ "global",
10
+ "local",
11
+ "global",
12
+ "local",
13
+ "global",
14
+ "local",
15
+ "global",
16
+ "local",
17
+ "global",
18
+ "local",
19
+ "global",
20
+ "local"
21
+ ],
22
+ "attention_types": [
23
+ [
24
+ [
25
+ "global",
26
+ "local"
27
+ ],
28
+ 6
29
+ ]
30
+ ],
31
+ "bos_token_id": 50256,
32
+ "classifier_dropout": 0.1,
33
+ "embed_dropout": 0,
34
+ "eos_token_id": 50256,
35
+ "gradient_checkpointing": false,
36
+ "hidden_size": 768,
37
+ "initializer_range": 0.02,
38
+ "intermediate_size": null,
39
+ "layer_norm_epsilon": 1e-05,
40
+ "max_position_embeddings": 2048,
41
+ "model_type": "gpt_neo",
42
+ "num_heads": 12,
43
+ "num_layers": 12,
44
+ "resid_dropout": 0,
45
+ "summary_activation": null,
46
+ "summary_first_dropout": 0.1,
47
+ "summary_proj_to_labels": true,
48
+ "summary_type": "cls_index",
49
+ "summary_use_proj": true,
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.36.2",
52
+ "use_cache": true,
53
+ "vocab_size": 50257,
54
+ "window_size": 256
55
+ }
emissions.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2024-03-26T17:25:38,codecarbon,9c97083e-b232-454a-9222-602cf20281a7,168.14643454551697,0.0046201703461238,2.7477064016324496e-05,42.5,166.799,11.905189990997316,0.0019848614114854,0.0075454761418133,0.0005555868708181,0.0100859244241168,South Korea,KOR,gyeonggi-do,,,Windows-10-10.0.19045-SP0,3.9.18,2.2.3,20,12th Gen Intel(R) Core(TM) i7-12700,1,1 x NVIDIA GeForce RTX 3060,127.1377,37.4331,31.74717330932617,machine,N,1.0
3
+ 2024-03-26T18:05:30,codecarbon,9f994406-2a0a-4112-bee8-9320dbd4e771,168.16500115394592,0.004624389146684945,2.749911762229031e-05,42.5,167.838,11.905189990997316,0.001985116079449654,0.007554412805462876,0.0005556052653468027,0.010095134150259331,South Korea,KOR,gyeonggi-do,,,Windows-10-10.0.19045-SP0,3.9.18,2.2.3,20,12th Gen Intel(R) Core(TM) i7-12700,1,1 x NVIDIA GeForce RTX 3060,127.1377,37.4331,31.747173309326172,machine,N,1.0
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.36.2"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56965b2f0eb79041031341f91bbb9f31edbd72ebad977cb4a1576468a1acf09b
3
+ size 500811336
runs/Mar26_17-22-46_kbs/events.out.tfevents.1711441370.kbs.26488.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:729ce84e98e72e976b9caff43a2d670b70a42c5358e975aa442cfc0d19b65f72
3
+ size 5983
runs/Mar26_17-22-46_kbs/events.out.tfevents.1711441542.kbs.26488.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d39921ddd9ef95628307cff8a85da14508286628e7219b4f0d415665eaf23146
3
+ size 311
runs/Mar26_18-02-38_kbs/events.out.tfevents.1711443761.kbs.26488.16 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf0e88d54667cac287e1222a2222e7d33970edcb4e60a96513f18023d8de0053
3
+ size 5983
runs/Mar26_18-02-38_kbs/events.out.tfevents.1711443949.kbs.26488.17 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1523a487c4b27617e39e1e7b5ba1ab2130a7d7ad87622ab2aa47401b83525cfc
3
+ size 311
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 2048,
19
+ "pad_token": "<|endoftext|>",
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:002cdf2e314687f84f90b5e918189983d1b05441033accef7e29212e179ef8ad
3
+ size 4283
vocab.json ADDED
The diff for this file is too large to render. See raw diff