kajamo commited on
Commit
6b28a82
1 Parent(s): b05ee89

Model save

Browse files
README.md CHANGED
@@ -15,17 +15,6 @@ should probably proofread and complete it, then remove this comment. -->
15
  # model_23
16
 
17
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
18
- It achieves the following results on the evaluation set:
19
- - eval_loss: 0.9087
20
- - eval_accuracy: 0.6419
21
- - eval_precision: 0.6553
22
- - eval_recall: 0.6419
23
- - eval_f1: 0.6273
24
- - eval_runtime: 42.4195
25
- - eval_samples_per_second: 288.664
26
- - eval_steps_per_second: 18.058
27
- - epoch: 2.0
28
- - step: 6124
29
 
30
  ## Model description
31
 
 
15
  # model_23
16
 
17
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  ## Model description
20
 
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78de1a9c08ae8ba475cf10f5f4a13205c3ec7f33772f6e4d74bead2ad5d98abe
3
  size 2521452
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53e5f2b8b2523bf7b95d877d4e8b029a0bf8dc61e80375cf1a526c4b6bb8978
3
  size 2521452
special_tokens_map.json CHANGED
@@ -1,37 +1,7 @@
1
  {
2
- "cls_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "mask_token": {
10
- "content": "[MASK]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "sep_token": {
24
- "content": "[SEP]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "unk_token": {
31
- "content": "[UNK]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- }
37
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
  "content": "[PAD]",
 
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "[PAD]",