from typing import Dict, List, Any from transformers import pipeline import torch class EndpointHandler(): def __init__(self, path=""): device = 0 if torch.cuda.is_available() else "cpu" self.pipe = pipeline( task="automatic-speech-recognition", model=path, chunk_length_s=30, device=device, ) # self.pipe.model.config.forced_decoder_ids = self.pipe.model.processor.get_decoder_prompt_ids(language="Slovenian", task="transcribe") # self.pipe.model.generation_config.forced_decoder_ids = self.pipe.model.config.forced_decoder_ids def __call__(self, data: Dict[str, Any], **kwargs) -> List[Dict[str, Any]]: """ data args: inputs (:obj: `str` | `PIL.Image` | `np.array`) kwargs Return: A :obj:`list` | `dict`: will be serialized and returned """ inputs = data.pop("inputs",data) language = kwargs.get("language", "sl") # print("inputs", inputs) prediction = self.pipe(inputs, generate_kwargs={"language": language, "task": "transcribe"}) return prediction