xlmr-tatoeba / README.md
bigpang's picture
Create README.md
2edcf1d verified

from optimum.onnxruntime import ORTModelForSequenceClassification from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline

转换 onnx 模型

def convert(path, onnx_path, onnx_path): onnx_model = ORTModelForSequenceClassification.from_pretrained(path, from_transformers=True) tokenizer = AutoTokenizer.from_pretrained(path)

onnx_model.save_pretrained(onnx_path)
tokenizer.save_pretrained(onnx_path)

加载模型,用pipeline包装

def load_model(model_name): model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) text_classification_pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer) print(text_classification_pipeline('这是一个简单的demo,用来防止忘记')) return text_classification_pipeline

加载 onnx 模型,用pipeline包装

def load_onnx_model(onnx_path): lang_tokenizer = AutoTokenizer.from_pretrained(onnx_path) lang_model = ORTModelForSequenceClassification.from_pretrained(onnx_path) lang_detecter = pipeline("text-classification", model=lang_model, tokenizer=lang_tokenizer, truncation=True) print(lang_detecter('这是一个简单的demo,用来防止忘记')) return lang_detecter