File size: 1,524 Bytes
71b2ec4
661e0c7
71b2ec4
133ee24
71b2ec4
 
 
 
 
133ee24
 
4c3d026
133ee24
71b2ec4
133ee24
71b2ec4
 
 
 
 
 
4222d8d
71b2ec4
 
 
 
4c3d026
133ee24
71b2ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
import autocausalfrompretrained 

class InteractiveChat:

    whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large") 
    whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")

    def __init__(self):
        self.zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
        self.zephyr_model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto")

    def generate_response(self, input_data):
        input_features = self.whisper_processor(input_data)  
        predicted_ids = self.whisper_model.generate(input_features)
        transcription = self.whisper_processor.batch_decode(predicted_ids)
        
        response = self.get_zephyr_response(transcription)
        
        self.speak(response)
        return response

    def get_zephyr_response(self, transcription):
        zephyr_pipeline = pipeline("text-generation")  
        response = zephyr_pipeline(transcription)[0]["generated_text"]
        return response

    def speak(self, text):
        speech_client = SpeechClient() 
        speech_client.synthesize(text)

    def generate_response(self, input):
      
        # get transcription from Whisper
      
        response = self.get_zephyr_response(transcription)
        
        self.speak(response)
        
        return response

interface = gr.Interface(
  gr.Audio(type="microphone"), 
  gr.Textbox(),
  self.generate_response
)

interface.launch()