Okyx commited on
Commit
277097b
1 Parent(s): 9946d5a

Upload TFBertForTokenClassification

Browse files
Files changed (3) hide show
  1. README.md +6 -20
  2. config.json +5 -1
  3. tf_model.h5 +2 -2
README.md CHANGED
@@ -14,9 +14,7 @@ probably proofread and complete it, then remove this comment. -->
14
 
15
  This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
- - Train Loss: 0.0087
18
- - Validation Loss: 0.2588
19
- - Epoch: 9
20
 
21
  ## Model description
22
 
@@ -35,28 +33,16 @@ More information needed
35
  ### Training hyperparameters
36
 
37
  The following hyperparameters were used during training:
38
- - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 2890, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
39
  - training_precision: mixed_float16
40
 
41
  ### Training results
42
 
43
- | Train Loss | Validation Loss | Epoch |
44
- |:----------:|:---------------:|:-----:|
45
- | 0.5009 | 0.2286 | 0 |
46
- | 0.1843 | 0.1930 | 1 |
47
- | 0.1116 | 0.1836 | 2 |
48
- | 0.0686 | 0.1865 | 3 |
49
- | 0.0430 | 0.2524 | 4 |
50
- | 0.0311 | 0.2881 | 5 |
51
- | 0.0186 | 0.2460 | 6 |
52
- | 0.0156 | 0.2437 | 7 |
53
- | 0.0127 | 0.2700 | 8 |
54
- | 0.0087 | 0.2588 | 9 |
55
 
56
 
57
  ### Framework versions
58
 
59
- - Transformers 4.22.1
60
- - TensorFlow 2.8.2
61
- - Datasets 2.5.1
62
- - Tokenizers 0.12.1
 
14
 
15
  This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+
 
 
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 22700, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
37
  - training_precision: mixed_float16
38
 
39
  ### Training results
40
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  ### Framework versions
44
 
45
+ - Transformers 4.24.0
46
+ - TensorFlow 2.9.2
47
+ - Datasets 2.6.1
48
+ - Tokenizers 0.13.2
config.json CHANGED
@@ -13,6 +13,8 @@
13
  "0": "O",
14
  "1": "B-Brand",
15
  "10": "I-TM",
 
 
16
  "2": "I-Brand",
17
  "3": "B-Model",
18
  "4": "I-Model",
@@ -27,11 +29,13 @@
27
  "label2id": {
28
  "B-Brand": "1",
29
  "B-CC": "7",
 
30
  "B-Model": "3",
31
  "B-TM": "9",
32
  "B-Tipe": "5",
33
  "I-Brand": "2",
34
  "I-CC": "8",
 
35
  "I-Model": "4",
36
  "I-TM": "10",
37
  "I-Tipe": "6",
@@ -44,7 +48,7 @@
44
  "num_hidden_layers": 12,
45
  "pad_token_id": 0,
46
  "position_embedding_type": "absolute",
47
- "transformers_version": "4.22.1",
48
  "type_vocab_size": 2,
49
  "use_cache": true,
50
  "vocab_size": 28996
 
13
  "0": "O",
14
  "1": "B-Brand",
15
  "10": "I-TM",
16
+ "11": "B-Color",
17
+ "12": "I-Color",
18
  "2": "I-Brand",
19
  "3": "B-Model",
20
  "4": "I-Model",
 
29
  "label2id": {
30
  "B-Brand": "1",
31
  "B-CC": "7",
32
+ "B-Color": "11",
33
  "B-Model": "3",
34
  "B-TM": "9",
35
  "B-Tipe": "5",
36
  "I-Brand": "2",
37
  "I-CC": "8",
38
+ "I-Color": "12",
39
  "I-Model": "4",
40
  "I-TM": "10",
41
  "I-Tipe": "6",
 
48
  "num_hidden_layers": 12,
49
  "pad_token_id": 0,
50
  "position_embedding_type": "absolute",
51
+ "transformers_version": "4.24.0",
52
  "type_vocab_size": 2,
53
  "use_cache": true,
54
  "vocab_size": 28996
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7dd0afd67966a8f4adf4668b57b2e0e8307a7843264002c1965f82b94115b55
3
- size 431198260
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a1939fe81972589c6743fbff06d57d2427a59b478b4b22d618c6983a5690a1
3
+ size 431204412