Sultannn commited on
Commit
53a0e3f
1 Parent(s): aafe56c

Training in progress epoch 0

Browse files
README.md CHANGED
@@ -13,9 +13,9 @@ probably proofread and complete it, then remove this comment. -->
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
- - Train Loss: 5.9875
17
- - Validation Loss: 6.2857
18
- - Epoch: 3
19
 
20
  ## Model description
21
 
@@ -34,22 +34,19 @@ More information needed
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
- - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 0.0007, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.0007, 'decay_steps': 28524, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 700, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
38
  - training_precision: mixed_float16
39
 
40
  ### Training results
41
 
42
  | Train Loss | Validation Loss | Epoch |
43
  |:----------:|:---------------:|:-----:|
44
- | 6.9124 | 6.3751 | 0 |
45
- | 6.1343 | 6.1381 | 1 |
46
- | 5.9190 | 6.1139 | 2 |
47
- | 5.9875 | 6.2857 | 3 |
48
 
49
 
50
  ### Framework versions
51
 
52
- - Transformers 4.19.1
53
- - TensorFlow 2.8.0
54
- - Datasets 2.2.1
55
  - Tokenizers 0.12.1
 
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
+ - Train Loss: 6.9373
17
+ - Validation Loss: 6.3278
18
+ - Epoch: 0
19
 
20
  ## Model description
21
 
 
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
+ - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 0.0006, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.0006, 'decay_steps': 33845, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 700, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
38
  - training_precision: mixed_float16
39
 
40
  ### Training results
41
 
42
  | Train Loss | Validation Loss | Epoch |
43
  |:----------:|:---------------:|:-----:|
44
+ | 6.9373 | 6.3278 | 0 |
 
 
 
45
 
46
 
47
  ### Framework versions
48
 
49
+ - Transformers 4.21.3
50
+ - TensorFlow 2.8.2
51
+ - Datasets 2.4.0
52
  - Tokenizers 0.12.1
added_tokens.json CHANGED
@@ -1 +1,3 @@
1
- {"<|endoftext|>": 35000}
 
 
 
1
+ {
2
+ "<|endoftext|>": 35000
3
+ }
config.json CHANGED
@@ -10,7 +10,7 @@
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
12
  "model_type": "gpt2",
13
- "n_ctx": 256,
14
  "n_embd": 768,
15
  "n_head": 12,
16
  "n_inner": null,
@@ -31,7 +31,7 @@
31
  "max_length": 50
32
  }
33
  },
34
- "transformers_version": "4.19.1",
35
  "use_cache": true,
36
  "vocab_size": 35000
37
  }
 
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
12
  "model_type": "gpt2",
13
+ "n_ctx": 512,
14
  "n_embd": 768,
15
  "n_head": 12,
16
  "n_inner": null,
 
31
  "max_length": 50
32
  }
33
  },
34
+ "transformers_version": "4.21.3",
35
  "use_cache": true,
36
  "vocab_size": 35000
37
  }
special_tokens_map.json CHANGED
@@ -1 +1,24 @@
1
- {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d291bdb27b8b6d9981e06cfb8c452fc82cd2df4f42c15c20717506f77c5a2ac
3
  size 451065960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ead25dc55e0f1e33a8e74677d900a0db217b8fcfe86a7b57f5c4c3037ac64b
3
  size 451065960
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 256,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 512,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
tokenizer_config.json CHANGED
@@ -1 +1,34 @@
1
- {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "max_len": 256, "special_tokens_map_file": null, "name_or_path": "./GPT-2-puisi", "errors": "replace", "pad_token": null, "add_bos_token": false, "tokenizer_class": "GPT2Tokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "max_len": 512,
22
+ "name_or_path": "./GPT-2-puisi",
23
+ "pad_token": null,
24
+ "special_tokens_map_file": null,
25
+ "tokenizer_class": "GPT2Tokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }