echarlaix HF staff commited on
Commit
9de6363
1 Parent(s): b7ae963

Upload model

Browse files
Files changed (5) hide show
  1. README.md +32 -0
  2. config.json +35 -0
  3. eval_results.json +7 -0
  4. pytorch_model.bin +3 -0
  5. quantization.yml +14 -0
README.md CHANGED
@@ -1,3 +1,35 @@
1
  ---
 
2
  license: apache-2.0
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: en
3
  license: apache-2.0
4
+ datasets:
5
+ - sst2
6
+ - glue
7
+ metrics:
8
+ - accuracy
9
+ tags:
10
+ - text-classfication
11
+ - int8
12
  ---
13
+
14
+ # Dynamically quantized DistilBERT base uncased finetuned SST-2
15
+
16
+ ## Table of Contents
17
+ - [Model Details](#model-details)
18
+ - [How to Get Started With the Model](#how-to-get-started-with-the-model)
19
+
20
+ ## Model Details
21
+ **Model Description:** This model is a [DistilBERT](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) fine-tuned on SST-2 dynamically quantized with [optimum-intel](https://github.com/huggingface/optimum-intel) through the usage of [Intel® Neural Compressor](https://github.com/intel/neural-compressor).
22
+ - **Model Type:** Text Classification
23
+ - **Language(s):** English
24
+ - **License:** Apache-2.0
25
+ - **Parent Model:** For more details on the original model, we encourage users to check out [this](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) model card.
26
+
27
+ ## How to Get Started With the Model
28
+
29
+ To load the quantized model, you can do as follows:
30
+
31
+ ```python
32
+ from optimum.intel.neural_compressor.quantization import IncQuantizedModelForSequenceClassification
33
+
34
+ model = IncQuantizedModelForSequenceClassification.from_pretrained("Intel/distilbert-base-uncased-finetuned-sst-2-english-int8-dynamic")
35
+ ```
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "finetuning_task": "sst-2",
11
+ "hidden_dim": 3072,
12
+ "id2label": {
13
+ "0": "NEGATIVE",
14
+ "1": "POSITIVE"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "NEGATIVE": 0,
19
+ "POSITIVE": 1
20
+ },
21
+ "max_position_embeddings": 512,
22
+ "model_type": "distilbert",
23
+ "n_heads": 12,
24
+ "n_layers": 6,
25
+ "output_past": true,
26
+ "pad_token_id": 0,
27
+ "problem_type": "single_label_classification",
28
+ "qa_dropout": 0.1,
29
+ "seq_classif_dropout": 0.2,
30
+ "sinusoidal_pos_embds": false,
31
+ "tie_weights_": true,
32
+ "torch_dtype": "int8",
33
+ "transformers_version": "4.19.4",
34
+ "vocab_size": 30522
35
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.9071100917431193,
3
+ "eval_loss": 0.3806319534778595,
4
+ "eval_runtime": 26.6222,
5
+ "eval_samples_per_second": 32.755,
6
+ "eval_steps_per_second": 4.094
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:980e9ed4055e37d54ba12c52383eb6b41bf8a4c11dde676aebab0d80e8d16531
3
+ size 139416577
quantization.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ device: cpu
2
+ model:
3
+ framework: pytorch
4
+ name: bert
5
+ quantization:
6
+ approach: post_training_dynamic_quant
7
+ tuning:
8
+ accuracy_criterion:
9
+ relative: 0.02
10
+ exit_policy:
11
+ max_trials: 4
12
+ timeout: 0
13
+ random_seed: 9527
14
+ version: 1.0