Teja-Gollapudi's picture
Update README.md
8841d1a
metadata
license: apache-2.0

Int-8 dynamic quantized version of (VMware/tinyroberta-mrqa)[https://huggingface.co/VMware/tinyroberta-mrqa].

from optimum.onnxruntime import ORTModelForQuestionAnswering
from transformers import pipeline, AutoTokenizer

model_name = 'VMware/tinyroberta-quantized-mrqa'
tokenizer = AutoTokenizer.from_pretrained(model_name)
quantized_model = ORTModelForQuestionAnswering.from_pretrained(model_name, file_name="model_quantize.onnx")

qa_model = pipeline('question-answering', model=quantized_model, tokenizer=tokenizer)

qa_input = {
    'question': '',
    'context': ''
}

qa_answers = qa_model(qa_input)