harr commited on
Commit
4e5658d
1 Parent(s): 1f4a542
README.md CHANGED
@@ -9,7 +9,7 @@ metrics:
9
  - recall
10
  - f1
11
  - accuracy
12
- model_index:
13
  - name: distilbert-base-uncased-finetuned-ingredients
14
  results:
15
  - task:
@@ -19,10 +19,19 @@ model_index:
19
  name: ingredients_yes_no
20
  type: ingredients_yes_no
21
  args: IngredientsYesNo
22
- metric:
23
- name: Accuracy
 
 
 
 
 
 
 
 
 
24
  type: accuracy
25
- value: 0.9929481628108375
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,11 +41,11 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the ingredients_yes_no dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.0262
36
- - Precision: 0.9914
37
  - Recall: 0.9986
38
- - F1: 0.9950
39
- - Accuracy: 0.9929
40
 
41
  ## Model description
42
 
@@ -67,14 +76,14 @@ The following hyperparameters were used during training:
67
 
68
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
69
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
70
- | No log | 1.0 | 186 | 0.0566 | 0.9814 | 0.9885 | 0.9849 | 0.9827 |
71
- | No log | 2.0 | 372 | 0.0375 | 0.9864 | 0.9978 | 0.9921 | 0.9887 |
72
- | 0.113 | 3.0 | 558 | 0.0262 | 0.9914 | 0.9986 | 0.9950 | 0.9929 |
73
 
74
 
75
  ### Framework versions
76
 
77
- - Transformers 4.9.2
78
  - Pytorch 1.9.0+cu102
79
  - Datasets 1.11.0
80
  - Tokenizers 0.10.3
 
9
  - recall
10
  - f1
11
  - accuracy
12
+ model-index:
13
  - name: distilbert-base-uncased-finetuned-ingredients
14
  results:
15
  - task:
 
19
  name: ingredients_yes_no
20
  type: ingredients_yes_no
21
  args: IngredientsYesNo
22
+ metrics:
23
+ - name: Precision
24
+ type: precision
25
+ value: 0.9878658101356174
26
+ - name: Recall
27
+ type: recall
28
+ value: 0.9985569985569985
29
+ - name: F1
30
+ type: f1
31
+ value: 0.9931826336562611
32
+ - name: Accuracy
33
  type: accuracy
34
+ value: 0.9928244463689224
35
  ---
36
 
37
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
41
 
42
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the ingredients_yes_no dataset.
43
  It achieves the following results on the evaluation set:
44
+ - Loss: 0.0280
45
+ - Precision: 0.9879
46
  - Recall: 0.9986
47
+ - F1: 0.9932
48
+ - Accuracy: 0.9928
49
 
50
  ## Model description
51
 
 
76
 
77
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
78
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
79
+ | No log | 1.0 | 186 | 0.0612 | 0.9807 | 0.9899 | 0.9853 | 0.9821 |
80
+ | No log | 2.0 | 372 | 0.0390 | 0.9836 | 0.9935 | 0.9885 | 0.9890 |
81
+ | 0.1179 | 3.0 | 558 | 0.0280 | 0.9879 | 0.9986 | 0.9932 | 0.9928 |
82
 
83
 
84
  ### Framework versions
85
 
86
+ - Transformers 4.10.0
87
  - Pytorch 1.9.0+cu102
88
  - Datasets 1.11.0
89
  - Tokenizers 0.10.3
config.json CHANGED
@@ -9,19 +9,19 @@
9
  "dropout": 0.1,
10
  "hidden_dim": 3072,
11
  "id2label": {
12
- "0": "LABEL_0",
13
- "1": "LABEL_1",
14
- "2": "LABEL_2",
15
- "3": "LABEL_3",
16
- "4": "LABEL_4"
17
  },
18
  "initializer_range": 0.02,
19
  "label2id": {
20
- "LABEL_0": 0,
21
- "LABEL_1": 1,
22
- "LABEL_2": 2,
23
- "LABEL_3": 3,
24
- "LABEL_4": 4
25
  },
26
  "max_position_embeddings": 512,
27
  "model_type": "distilbert",
@@ -33,6 +33,6 @@
33
  "sinusoidal_pos_embds": false,
34
  "tie_weights_": true,
35
  "torch_dtype": "float32",
36
- "transformers_version": "4.9.2",
37
  "vocab_size": 30522
38
  }
 
9
  "dropout": 0.1,
10
  "hidden_dim": 3072,
11
  "id2label": {
12
+ "0": "ADD-B",
13
+ "1": "ADD-C",
14
+ "2": "REM-B",
15
+ "3": "REM-C",
16
+ "4": "O"
17
  },
18
  "initializer_range": 0.02,
19
  "label2id": {
20
+ "ADD-B": 0,
21
+ "ADD-C": 1,
22
+ "O": 4,
23
+ "REM-B": 2,
24
+ "REM-C": 3
25
  },
26
  "max_position_embeddings": 512,
27
  "model_type": "distilbert",
 
33
  "sinusoidal_pos_embds": false,
34
  "tie_weights_": true,
35
  "torch_dtype": "float32",
36
+ "transformers_version": "4.10.0",
37
  "vocab_size": 30522
38
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1bcc6d955021a9eb564d36424ab859bb3d5b4626151a33ddecb94a01108be957
3
  size 265506293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:503cb238903261f0f8cac9dd517c723bb2ad363be5a5667f66e46e79222bb854
3
  size 265506293
runs/Aug31_14-06-20_9e2c9a21c510/1630418899.7356794/events.out.tfevents.1630418899.9e2c9a21c510.77.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c5ae0df9f76cbecc1e35f73e9279d94661873be116a973fc3a8f5a6e0b7570
3
+ size 4230
runs/Aug31_14-06-20_9e2c9a21c510/events.out.tfevents.1630418899.9e2c9a21c510.77.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c072085bc1e577550d6cc60a7b7bd00bde809af9c50d37bc906a6ea77ebfd86d
3
+ size 5008
runs/Aug31_14-06-20_9e2c9a21c510/events.out.tfevents.1630418975.9e2c9a21c510.77.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e888ced6dc5aff64f7830bf1021f691713947601ee052e01106ee315e977154
3
+ size 512
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9046051e46038bd0e8b3f706ed7ec1b37e298023c3382bf38fcff9dc52b1622
3
  size 2671
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a682fa36f6065ff2cf83b4397ce8c7b995377546727d42eea2aa951aed9cdcb1
3
  size 2671