Update README.md
Browse files
README.md
CHANGED
@@ -8,9 +8,17 @@ license: mit
|
|
8 |
This model is the dynamically quantized version of the model
|
9 |
(https://akdeniz27/bert-base-turkish-cased-ner)
|
10 |
|
11 |
-
# How to use:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
```
|
13 |
-
|
14 |
tokenizer = AutoTokenizer.from_pretrained("akdeniz27/bert-base-turkish-cased-ner-quantized")
|
15 |
ner = pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy="first")
|
16 |
ner("your text here")
|
|
|
8 |
This model is the dynamically quantized version of the model
|
9 |
(https://akdeniz27/bert-base-turkish-cased-ner)
|
10 |
|
11 |
+
# How to use:
|
12 |
+
First install "optimum[onnxruntime]":
|
13 |
+
!pip install "optimum[onnxruntime]"
|
14 |
+
|
15 |
+
and import "ORTModelForTokenClassification":
|
16 |
+
|
17 |
+
from transformers import AutoTokenizer, pipeline
|
18 |
+
from optimum.onnxruntime import ORTModelForTokenClassification
|
19 |
+
|
20 |
```
|
21 |
+
model = ORTModelForTokenClassification.from_pretrained("akdeniz27/bert-base-turkish-cased-ner-quantized", file_name="model_quantized.onnx")
|
22 |
tokenizer = AutoTokenizer.from_pretrained("akdeniz27/bert-base-turkish-cased-ner-quantized")
|
23 |
ner = pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy="first")
|
24 |
ner("your text here")
|