File size: 1,305 Bytes
2edcf1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
from optimum.onnxruntime import ORTModelForSequenceClassification
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
# 转换 onnx 模型
def convert(path, onnx_path, onnx_path):
onnx_model = ORTModelForSequenceClassification.from_pretrained(path, from_transformers=True)
tokenizer = AutoTokenizer.from_pretrained(path)
onnx_model.save_pretrained(onnx_path)
tokenizer.save_pretrained(onnx_path)
# 加载模型,用pipeline包装
def load_model(model_name):
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
text_classification_pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer)
print(text_classification_pipeline('这是一个简单的demo,用来防止忘记'))
return text_classification_pipeline
# 加载 onnx 模型,用pipeline包装
def load_onnx_model(onnx_path):
lang_tokenizer = AutoTokenizer.from_pretrained(onnx_path)
lang_model = ORTModelForSequenceClassification.from_pretrained(onnx_path)
lang_detecter = pipeline("text-classification", model=lang_model, tokenizer=lang_tokenizer, truncation=True)
print(lang_detecter('这是一个简单的demo,用来防止忘记'))
return lang_detecter
|