nikoslefkos
commited on
Commit
•
846cbb8
1
Parent(s):
6f41197
Training in progress epoch 0
Browse files- README.md +54 -0
- config.json +102 -0
- special_tokens_map.json +7 -0
- tf_model.h5 +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +13 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
base_model: distilbert-base-cased
|
4 |
+
tags:
|
5 |
+
- generated_from_keras_callback
|
6 |
+
model-index:
|
7 |
+
- name: nikoslefkos/nerbert_ontonotes
|
8 |
+
results: []
|
9 |
+
---
|
10 |
+
|
11 |
+
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
12 |
+
probably proofread and complete it, then remove this comment. -->
|
13 |
+
|
14 |
+
# nikoslefkos/nerbert_ontonotes
|
15 |
+
|
16 |
+
This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on an unknown dataset.
|
17 |
+
It achieves the following results on the evaluation set:
|
18 |
+
- Train Loss: 0.2213
|
19 |
+
- Validation Loss: 0.1077
|
20 |
+
- Epoch: 0
|
21 |
+
|
22 |
+
## Model description
|
23 |
+
|
24 |
+
More information needed
|
25 |
+
|
26 |
+
## Intended uses & limitations
|
27 |
+
|
28 |
+
More information needed
|
29 |
+
|
30 |
+
## Training and evaluation data
|
31 |
+
|
32 |
+
More information needed
|
33 |
+
|
34 |
+
## Training procedure
|
35 |
+
|
36 |
+
### Training hyperparameters
|
37 |
+
|
38 |
+
The following hyperparameters were used during training:
|
39 |
+
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 9360, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
|
40 |
+
- training_precision: float32
|
41 |
+
|
42 |
+
### Training results
|
43 |
+
|
44 |
+
| Train Loss | Validation Loss | Epoch |
|
45 |
+
|:----------:|:---------------:|:-----:|
|
46 |
+
| 0.2213 | 0.1077 | 0 |
|
47 |
+
|
48 |
+
|
49 |
+
### Framework versions
|
50 |
+
|
51 |
+
- Transformers 4.31.0
|
52 |
+
- TensorFlow 2.13.0
|
53 |
+
- Datasets 2.14.0
|
54 |
+
- Tokenizers 0.13.3
|
config.json
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-cased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForTokenClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "O",
|
13 |
+
"1": "B-CARDINAL",
|
14 |
+
"2": "B-DATE",
|
15 |
+
"3": "I-DATE",
|
16 |
+
"4": "B-PERSON",
|
17 |
+
"5": "I-PERSON",
|
18 |
+
"6": "B-NORP",
|
19 |
+
"7": "B-GPE",
|
20 |
+
"8": "I-GPE",
|
21 |
+
"9": "B-LAW",
|
22 |
+
"10": "I-LAW",
|
23 |
+
"11": "B-ORG",
|
24 |
+
"12": "I-ORG",
|
25 |
+
"13": "B-PERCENT",
|
26 |
+
"14": "I-PERCENT",
|
27 |
+
"15": "B-ORDINAL",
|
28 |
+
"16": "B-MONEY",
|
29 |
+
"17": "I-MONEY",
|
30 |
+
"18": "B-WORK_OF_ART",
|
31 |
+
"19": "I-WORK_OF_ART",
|
32 |
+
"20": "B-FAC",
|
33 |
+
"21": "B-TIME",
|
34 |
+
"22": "I-CARDINAL",
|
35 |
+
"23": "B-LOC",
|
36 |
+
"24": "B-QUANTITY",
|
37 |
+
"25": "I-QUANTITY",
|
38 |
+
"26": "I-NORP",
|
39 |
+
"27": "I-LOC",
|
40 |
+
"28": "B-PRODUCT",
|
41 |
+
"29": "I-TIME",
|
42 |
+
"30": "B-EVENT",
|
43 |
+
"31": "I-EVENT",
|
44 |
+
"32": "I-FAC",
|
45 |
+
"33": "B-LANGUAGE",
|
46 |
+
"34": "I-PRODUCT",
|
47 |
+
"35": "I-ORDINAL",
|
48 |
+
"36": "I-LANGUAGE"
|
49 |
+
},
|
50 |
+
"initializer_range": 0.02,
|
51 |
+
"label2id": {
|
52 |
+
"B-CARDINAL": 1,
|
53 |
+
"B-DATE": 2,
|
54 |
+
"B-EVENT": 30,
|
55 |
+
"B-FAC": 20,
|
56 |
+
"B-GPE": 7,
|
57 |
+
"B-LANGUAGE": 33,
|
58 |
+
"B-LAW": 9,
|
59 |
+
"B-LOC": 23,
|
60 |
+
"B-MONEY": 16,
|
61 |
+
"B-NORP": 6,
|
62 |
+
"B-ORDINAL": 15,
|
63 |
+
"B-ORG": 11,
|
64 |
+
"B-PERCENT": 13,
|
65 |
+
"B-PERSON": 4,
|
66 |
+
"B-PRODUCT": 28,
|
67 |
+
"B-QUANTITY": 24,
|
68 |
+
"B-TIME": 21,
|
69 |
+
"B-WORK_OF_ART": 18,
|
70 |
+
"I-CARDINAL": 22,
|
71 |
+
"I-DATE": 3,
|
72 |
+
"I-EVENT": 31,
|
73 |
+
"I-FAC": 32,
|
74 |
+
"I-GPE": 8,
|
75 |
+
"I-LANGUAGE": 36,
|
76 |
+
"I-LAW": 10,
|
77 |
+
"I-LOC": 27,
|
78 |
+
"I-MONEY": 17,
|
79 |
+
"I-NORP": 26,
|
80 |
+
"I-ORDINAL": 35,
|
81 |
+
"I-ORG": 12,
|
82 |
+
"I-PERCENT": 14,
|
83 |
+
"I-PERSON": 5,
|
84 |
+
"I-PRODUCT": 34,
|
85 |
+
"I-QUANTITY": 25,
|
86 |
+
"I-TIME": 29,
|
87 |
+
"I-WORK_OF_ART": 19,
|
88 |
+
"O": 0
|
89 |
+
},
|
90 |
+
"max_position_embeddings": 512,
|
91 |
+
"model_type": "distilbert",
|
92 |
+
"n_heads": 12,
|
93 |
+
"n_layers": 6,
|
94 |
+
"output_past": true,
|
95 |
+
"pad_token_id": 0,
|
96 |
+
"qa_dropout": 0.1,
|
97 |
+
"seq_classif_dropout": 0.2,
|
98 |
+
"sinusoidal_pos_embds": false,
|
99 |
+
"tie_weights_": true,
|
100 |
+
"transformers_version": "4.31.0",
|
101 |
+
"vocab_size": 28996
|
102 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66dda3a250c9c832e5de13c5e879f60e8b868e03269de888106b06bd4af10831
|
3 |
+
size 261004560
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_lower_case": false,
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"model_max_length": 512,
|
7 |
+
"pad_token": "[PAD]",
|
8 |
+
"sep_token": "[SEP]",
|
9 |
+
"strip_accents": null,
|
10 |
+
"tokenize_chinese_chars": true,
|
11 |
+
"tokenizer_class": "DistilBertTokenizer",
|
12 |
+
"unk_token": "[UNK]"
|
13 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|