LanguageSavvy commited on
Commit
fe800a0
1 Parent(s): d6e3f83

Training in progress epoch 0

Browse files
README.md CHANGED
@@ -15,8 +15,8 @@ probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Train Loss: 2.0343
19
- - Validation Loss: 1.8209
20
  - Epoch: 0
21
 
22
  ## Model description
@@ -43,12 +43,12 @@ The following hyperparameters were used during training:
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
- | 2.0343 | 1.8209 | 0 |
47
 
48
 
49
  ### Framework versions
50
 
51
- - Transformers 4.34.0
52
- - TensorFlow 2.13.0
53
- - Datasets 2.14.5
54
  - Tokenizers 0.14.1
 
15
 
16
  This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Train Loss: 2.0197
19
+ - Validation Loss: 1.8531
20
  - Epoch: 0
21
 
22
  ## Model description
 
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
+ | 2.0197 | 1.8531 | 0 |
47
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.35.1
52
+ - TensorFlow 2.14.0
53
+ - Datasets 2.14.7
54
  - Tokenizers 0.14.1
config.json CHANGED
@@ -19,7 +19,7 @@
19
  "num_hidden_layers": 6,
20
  "pad_token_id": 1,
21
  "position_embedding_type": "absolute",
22
- "transformers_version": "4.34.0",
23
  "type_vocab_size": 1,
24
  "use_cache": true,
25
  "vocab_size": 50265
 
19
  "num_hidden_layers": 6,
20
  "pad_token_id": 1,
21
  "position_embedding_type": "absolute",
22
+ "transformers_version": "4.35.1",
23
  "type_vocab_size": 1,
24
  "use_cache": true,
25
  "vocab_size": 50265
special_tokens_map.json CHANGED
@@ -2,7 +2,13 @@
2
  "bos_token": "<s>",
3
  "cls_token": "<s>",
4
  "eos_token": "</s>",
5
- "mask_token": "<mask>",
 
 
 
 
 
 
6
  "pad_token": "<pad>",
7
  "sep_token": "</s>",
8
  "unk_token": "<unk>"
 
2
  "bos_token": "<s>",
3
  "cls_token": "<s>",
4
  "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
  "pad_token": "<pad>",
13
  "sep_token": "</s>",
14
  "unk_token": "<unk>"
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93e047120970464a0f40f18d69b5cc098c61d889e832659c5b83263d1aa64ee0
3
  size 484838604
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1792b1bcdc7d40468d6efd320855c1a363177b17222b73a8f72f51949f3e0ce
3
  size 484838604
tokenizer.json CHANGED
@@ -9,7 +9,7 @@
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
- "normalized": false,
13
  "special": true
14
  },
15
  {
@@ -18,7 +18,7 @@
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
- "normalized": false,
22
  "special": true
23
  },
24
  {
@@ -27,7 +27,7 @@
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
- "normalized": false,
31
  "special": true
32
  },
33
  {
@@ -36,7 +36,7 @@
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
- "normalized": false,
40
  "special": true
41
  },
42
  {
 
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
+ "normalized": true,
13
  "special": true
14
  },
15
  {
 
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": true,
22
  "special": true
23
  },
24
  {
 
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": true,
31
  "special": true
32
  },
33
  {
 
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
+ "normalized": true,
40
  "special": true
41
  },
42
  {
tokenizer_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "0": {
5
  "content": "<s>",
6
  "lstrip": false,
7
- "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
@@ -12,7 +12,7 @@
12
  "1": {
13
  "content": "<pad>",
14
  "lstrip": false,
15
- "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
@@ -20,7 +20,7 @@
20
  "2": {
21
  "content": "</s>",
22
  "lstrip": false,
23
- "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
@@ -28,7 +28,7 @@
28
  "3": {
29
  "content": "<unk>",
30
  "lstrip": false,
31
- "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
@@ -42,7 +42,6 @@
42
  "special": true
43
  }
44
  },
45
- "additional_special_tokens": [],
46
  "bos_token": "<s>",
47
  "clean_up_tokenization_spaces": true,
48
  "cls_token": "<s>",
 
4
  "0": {
5
  "content": "<s>",
6
  "lstrip": false,
7
+ "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
 
12
  "1": {
13
  "content": "<pad>",
14
  "lstrip": false,
15
+ "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
 
20
  "2": {
21
  "content": "</s>",
22
  "lstrip": false,
23
+ "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
 
28
  "3": {
29
  "content": "<unk>",
30
  "lstrip": false,
31
+ "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
 
42
  "special": true
43
  }
44
  },
 
45
  "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": true,
47
  "cls_token": "<s>",