import librosa import numpy as np from .init import pipe TASK = "transcribe" BATCH_SIZE = 8 LIMIT = 60 SAMPLING_RATE = 16000 class A2T: def __init__(self, mic): self.mic = mic def __transcribe(self, inputs, task: str = None): if inputs is None: print("Inputs None") transcribed_text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"] return transcribed_text def __preprocces(self, raw: np.ndarray, sampling_rate: int): chunk = raw.astype(np.float32, order='C') / 32768.0 print(f"Chunk : {chunk} max chunk : {np.max(chunk)}") if len(chunk.shape) > 1: chunk = librosa.to_mono(chunk.T) chunk = chunk[:SAMPLING_RATE*LIMIT] return chunk def predict(self): try: if self.mic is not None: raw = self.mic.get_array_of_samples() chunk = np.array(raw, dtype=np.int16) sampling_rate = self.mic.frame_rate audio = self.__preprocces(raw=chunk, sampling_rate=sampling_rate) print(f"audio : {audio} \n shape : {audio.shape} \n max : {np.max(audio)} \n shape of chunk : {chunk.shape} \n sampling rate : {sampling_rate} \n max chunk : {np.max(chunk)} \n chunk : {chunk}") else: raise Exception("please provide audio") if isinstance(audio , np.ndarray): inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate} return self.__transcribe(inputs=inputs, task=TASK) else: raise Exception("Audio is not np array") except Exception as e: return f"Oops some kinda error : {e}"