helenai commited on
Commit
fcff3e3
1 Parent(s): eca1a12

Add distilbert SST2 NNCF model

Browse files
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ license: apache-2.0
4
+ datasets:
5
+ - sst2
6
+ - glue
7
+ tags:
8
+ - openvino
9
+ ---
10
+
11
+ ## distilbert-base-uncased-finetuned-sst-2-english
12
+
13
+ [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) quantized with NNCF PTQ and exported to the OpenVINO IR.
14
+
15
+ **Model Description:** l is This model reaches an accuracy of 90.0 on the validation set. See [ov\_config.json](./ov_config.json) for the quantization config.
16
+
17
+ ## Usage example
18
+
19
+ You can use this model with Transformers *pipeline*.
20
+
21
+ ```python
22
+ from transformers import AutoTokenizer, pipeline
23
+ from optimum.intel.openvino import OVModelForSequenceClassification
24
+
25
+ model_id = "helenai/distilbert-base-uncased-finetuned-sst-2-english-ov-int8"
26
+ model = OVModelForSequenceClassification.from_pretrained(model_id)
27
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
28
+ cls_pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
29
+ text = "He's a dreadful magician."
30
+ outputs = cls_pipe(text)
31
+ print(outputs)
32
+ ```
33
+
34
+ Example output:
35
+
36
+ ```bash
37
+ [{'label': 'NEGATIVE', 'score': 0.9929909706115723}]
38
+ ```
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "finetuning_task": "sst-2",
11
+ "hidden_dim": 3072,
12
+ "id2label": {
13
+ "0": "NEGATIVE",
14
+ "1": "POSITIVE"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "NEGATIVE": 0,
19
+ "POSITIVE": 1
20
+ },
21
+ "max_position_embeddings": 512,
22
+ "model_type": "distilbert",
23
+ "n_heads": 12,
24
+ "n_layers": 6,
25
+ "output_past": true,
26
+ "pad_token_id": 0,
27
+ "qa_dropout": 0.1,
28
+ "seq_classif_dropout": 0.2,
29
+ "sinusoidal_pos_embds": false,
30
+ "tie_weights_": true,
31
+ "transformers_version": "4.22.2",
32
+ "vocab_size": 30522
33
+ }
ov_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "compression": {
4
+ "algorithm": "quantization",
5
+ "ignored_scopes": [
6
+ "{re}.*Embeddings.*",
7
+ "{re}.*__add___[0-1]",
8
+ "{re}.*layer_norm_0",
9
+ "{re}.*matmul_1",
10
+ "{re}.*__truediv__*"
11
+ ],
12
+ "initializer": {
13
+ "batchnorm_adaptation": {
14
+ "num_bn_adaptation_samples": 0
15
+ },
16
+ "range": {
17
+ "num_init_samples": 300,
18
+ "type": "mean_min_max"
19
+ }
20
+ },
21
+ "overflow_fix": "disable",
22
+ "preset": "mixed",
23
+ "scope_overrides": {
24
+ "activations": {
25
+ "{re}.*matmul_0": {
26
+ "mode": "symmetric"
27
+ }
28
+ }
29
+ }
30
+ },
31
+ "input_info": [
32
+ {
33
+ "sample_size": [
34
+ 8,
35
+ 256
36
+ ],
37
+ "type": "long"
38
+ },
39
+ {
40
+ "sample_size": [
41
+ 8,
42
+ 256
43
+ ],
44
+ "type": "long"
45
+ }
46
+ ],
47
+ "optimum_version": "1.4.0",
48
+ "transformers_version": "4.22.2"
49
+ }
ov_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5051f5e3c6a1c8cd9f51755c8691299c9938daa2ef94def5b709950c630677c
3
+ size 138817460
ov_model.xml ADDED
The diff for this file is too large to render. See raw diff
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "DistilBertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff