helenai commited on
Commit
8e2663d
1 Parent(s): e3219aa

commit files to HF hub

Browse files
Files changed (1) hide show
  1. ptq_ner.py +30 -0
ptq_ner.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from optimum.intel.openvino import OVQuantizer, OVModelForTokenClassification
3
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
4
+
5
+ model_id = "elastic/distilbert-base-uncased-finetuned-conll03-english"
6
+ # model_id = "xlm-roberta-large-finetuned-conll03-english"
7
+ model_id = "dbmdz/bert-large-cased-finetuned-conll03-english"
8
+ model = AutoModelForTokenClassification.from_pretrained(model_id)
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
10
+ # tokenizer.pad_token_id=0
11
+
12
+ def preprocess_fn(examples, tokenizer):
13
+ return tokenizer(
14
+ examples["tokens"], padding="max_length", max_length=128, truncation=True, is_split_into_words=True
15
+ )
16
+
17
+ quantizer = OVQuantizer.from_pretrained(model)
18
+ calibration_dataset = quantizer.get_calibration_dataset(
19
+ "conll2003",
20
+ preprocess_function=partial(preprocess_fn, tokenizer=tokenizer),
21
+ num_samples=300,
22
+ dataset_split="validation",
23
+ preprocess_batch=True,
24
+ )
25
+ # The directory where the quantized model will be saved
26
+ save_dir = f"{model_id}_ov_int8"
27
+ # Apply static quantization and save the resulting model in the OpenVINO IR format
28
+ quantizer.quantize(calibration_dataset=calibration_dataset, save_directory=save_dir)
29
+ # Load the quantized model
30
+ optimized_model = OVModelForTokenClassification.from_pretrained(save_dir)