|
from typing import Dict, List, Any |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline |
|
from optimum.onnxruntime import ORTModelForSequenceClassification |
|
import torch |
|
from PIL import Image |
|
import numpy as np |
|
import librosa |
|
|
|
class EndpointHandler: |
|
def __init__(self, path=""): |
|
""" |
|
Initialize the handler. This loads the tokenizer and model required for inference. |
|
We will load the `ronai-multimodal-perceiver-tsx` model for multimodal input handling. |
|
""" |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(path) |
|
self.model = ORTModelForSequenceClassification.from_pretrained(path) |
|
|
|
|
|
self.pipeline = pipeline("text-classification", model=self.model, tokenizer=self.tokenizer) |
|
|
|
def preprocess(self, data: Dict[str, Any]) -> Dict[str, Any]: |
|
""" |
|
Preprocess input data based on the modality. |
|
This handler supports text, image, and audio data. |
|
""" |
|
inputs = data.get("inputs", None) |
|
|
|
if isinstance(inputs, str): |
|
|
|
tokens = self.tokenizer(inputs, return_tensors="pt") |
|
return tokens |
|
|
|
elif isinstance(inputs, Image.Image): |
|
|
|
image = np.array(inputs) |
|
image_tensor = torch.tensor(image).unsqueeze(0) |
|
return image_tensor |
|
|
|
elif isinstance(inputs, np.ndarray): |
|
|
|
return torch.tensor(inputs).unsqueeze(0) |
|
|
|
elif isinstance(inputs, bytes): |
|
|
|
audio, sr = librosa.load(inputs, sr=None) |
|
mel_spectrogram = librosa.feature.melspectrogram(audio, sr=sr) |
|
mel_tensor = torch.tensor(mel_spectrogram).unsqueeze(0).unsqueeze(0) |
|
return mel_tensor |
|
|
|
else: |
|
raise ValueError("Unsupported input type. Must be string (text), image (PIL), or array (audio, etc.).") |
|
|
|
def postprocess(self, outputs: Any) -> List[Dict[str, Any]]: |
|
""" |
|
Post-process the model output to a human-readable format. |
|
For text classification, this returns label and score. |
|
""" |
|
logits = outputs.logits |
|
probabilities = torch.nn.functional.softmax(logits, dim=-1) |
|
predicted_class_id = probabilities.argmax().item() |
|
score = probabilities[0, predicted_class_id].item() |
|
|
|
return [{"label": self.model.config.id2label[predicted_class_id], "score": score}] |
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
""" |
|
Handles the incoming request, processes the input, runs inference, and returns results. |
|
Args: |
|
data (Dict[str, Any]): The input data for inference. |
|
- data["inputs"] could be a string (text), PIL.Image (image), np.ndarray (audio or point clouds). |
|
Returns: |
|
A list of dictionaries containing the model's prediction. |
|
""" |
|
|
|
preprocessed_data = self.preprocess(data) |
|
|
|
|
|
outputs = self.pipeline(preprocessed_data) |
|
|
|
|
|
return self.postprocess(outputs) |
|
|