Ruth commited on
Commit
92894c1
1 Parent(s): f35e9bf

Training in progress epoch 0

Browse files
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_keras_callback
5
+ model-index:
6
+ - name: Ruth/gelectra-large-germeval_14
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
11
+ probably proofread and complete it, then remove this comment. -->
12
+
13
+ # Ruth/gelectra-large-germeval_14
14
+
15
+ This model is a fine-tuned version of [deepset/gelectra-large](https://huggingface.co/deepset/gelectra-large) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Train Loss: 0.1998
18
+ - Validation Loss: 0.0986
19
+ - Epoch: 0
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 12750, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
39
+ - training_precision: float32
40
+
41
+ ### Training results
42
+
43
+ | Train Loss | Validation Loss | Epoch |
44
+ |:----------:|:---------------:|:-----:|
45
+ | 0.1998 | 0.0986 | 0 |
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.18.0
51
+ - TensorFlow 2.6.2
52
+ - Datasets 1.18.0
53
+ - Tokenizers 0.12.1
config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepset/gelectra-large",
3
+ "architectures": [
4
+ "ElectraForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 1024,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "O",
14
+ "1": "B-LOC",
15
+ "10": "I-ORGderiv",
16
+ "11": "B-ORGpart",
17
+ "12": "I-ORGpart",
18
+ "13": "B-OTH",
19
+ "14": "I-OTH",
20
+ "15": "B-OTHderiv",
21
+ "16": "I-OTHderiv",
22
+ "17": "B-OTHpart",
23
+ "18": "I-OTHpart",
24
+ "19": "B-PER",
25
+ "2": "I-LOC",
26
+ "20": "I-PER",
27
+ "21": "B-PERderiv",
28
+ "22": "I-PERderiv",
29
+ "23": "B-PERpart",
30
+ "24": "I-PERpart",
31
+ "3": "B-LOCderiv",
32
+ "4": "I-LOCderiv",
33
+ "5": "B-LOCpart",
34
+ "6": "I-LOCpart",
35
+ "7": "B-ORG",
36
+ "8": "I-ORG",
37
+ "9": "B-ORGderiv"
38
+ },
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 4096,
41
+ "label2id": {
42
+ "B-LOC": "1",
43
+ "B-LOCderiv": "3",
44
+ "B-LOCpart": "5",
45
+ "B-ORG": "7",
46
+ "B-ORGderiv": "9",
47
+ "B-ORGpart": "11",
48
+ "B-OTH": "13",
49
+ "B-OTHderiv": "15",
50
+ "B-OTHpart": "17",
51
+ "B-PER": "19",
52
+ "B-PERderiv": "21",
53
+ "B-PERpart": "23",
54
+ "I-LOC": "2",
55
+ "I-LOCderiv": "4",
56
+ "I-LOCpart": "6",
57
+ "I-ORG": "8",
58
+ "I-ORGderiv": "10",
59
+ "I-ORGpart": "12",
60
+ "I-OTH": "14",
61
+ "I-OTHderiv": "16",
62
+ "I-OTHpart": "18",
63
+ "I-PER": "20",
64
+ "I-PERderiv": "22",
65
+ "I-PERpart": "24",
66
+ "O": "0"
67
+ },
68
+ "layer_norm_eps": 1e-12,
69
+ "max_position_embeddings": 512,
70
+ "model_type": "electra",
71
+ "num_attention_heads": 16,
72
+ "num_hidden_layers": 24,
73
+ "pad_token_id": 0,
74
+ "position_embedding_type": "absolute",
75
+ "summary_activation": "gelu",
76
+ "summary_last_dropout": 0.1,
77
+ "summary_type": "first",
78
+ "summary_use_proj": true,
79
+ "transformers_version": "4.18.0",
80
+ "type_vocab_size": 2,
81
+ "use_cache": true,
82
+ "vocab_size": 31102
83
+ }
logs/train/events.out.tfevents.1651755288.bb3a84e6bfa0.600.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50033703211b78bcbcb9360304e79471e1b323e07a5882b3ec1836efb208aab8
3
+ size 5929636
logs/train/events.out.tfevents.1651755309.bb3a84e6bfa0.profile-empty ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:636351bad79ef39abb765cecc1b4d4b9eddb74deadb9644fe6b4b121a218b866
3
+ size 40
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.input_pipeline.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:475859e514fe1aae7840519430949f3223aadbc12efd1e14031271a67b478747
3
+ size 3052
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.kernel_stats.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f24f8069b5deef04f185f5f629ce74968acbd414316d010ba85c5240bcdc1155
3
+ size 317283
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.memory_profile.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2eabafe9e161358a7b1cabc84a6c2e071f5a4b1600a3bbf40224a6e3e016c6
3
+ size 52462
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.overview_page.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d6d99e937aee22fae161432cc66d64cd5ba932bb5836627a072c0068830e661
3
+ size 5407
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.tensorflow_stats.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba0902ebdd093d5dedbe04cf321d00d09f7acbae1eca5aa02ee236de5b1d04d
3
+ size 197959
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.trace.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76cc8ef7a8c2b7cfb2b75aad6a93df01f5e3bce170807b05899edb3c68aac38a
3
+ size 556049
logs/train/plugins/profile/2022_05_05_12_55_09/bb3a84e6bfa0.xplane.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9dbbf057319ff3598772c6bd255cfd4ed80fd4609d9d05393006a6d2de07da6
3
+ size 4562720
logs/validation/events.out.tfevents.1651755959.bb3a84e6bfa0.600.1.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f690e6edeba1bd10fa4ec5e55d1d373bfe6a5c07b8307371025a8fdb581b64f
3
+ size 194
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abe806f44adfd27b42d32e6dd170df247f73f21cc9054b44cd641760fc27d426
3
+ size 1339399480
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": false, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "deepset/gelectra-large", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "ElectraTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff