from typing import Union

from datasets import load_dataset
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from transformers import pipeline
import torch
import soundfile as sf
import uuid
import sys 
sys.path.append('E:\\ffmpeg\\ffmpeg-2023-05-22-git-877ccaf776-full_build\\bin')
sys.path.append('E:\\ffmpeg\\ffmpeg-2023-05-22-git-877ccaf776-full_build\\bin\\ffmpeg.exe')

pipe = pipeline(
    task="automatic-speech-recognition",
    model="openai/whisper-base",
    chunk_length_s=100,
    device="cpu",
)



chatbot = pipeline(
    task="conversational",
    model="microsoft/DialoGPT-medium",

)

facebootChat = pipeline(
    task="conversational",
    model="facebook/blenderbot-400M-distill",
)



if __name__=='__main__':
    #trans('file/3be9d41d2c4e4ce89f9f7d6860a46a25.wav')
    out = pipe('save_record/blob.wav')
    print(out['text'])
