ahricat commited on
Commit
133ee24
1 Parent(s): 25d1ea3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -16
app.py CHANGED
@@ -1,20 +1,53 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
2
 
3
- with gr.Blocks() as demo:
4
- model_choice = gr.Dropdown(["openai/whisper-large", "HuggingFaceH4/zephyr-7b-beta"],
5
- label="Choose Model", value="openai/whisper-large")
6
- input_data = gr.Audio(source="microphone", type="filepath", label="Speak Your Message") # Note: type is now "filepath"
7
- output_text = gr.Textbox(label="Transcription and Response")
8
 
9
- def generate_response(audio_path, model_name):
10
- hf_interface = gr.Interface.load(model_name) # Load the model directly from the Hub
11
- if model_name == "openai/whisper-large":
12
- transcription = hf_interface(audio_path) # Handle transcription
13
- else: # Zephyr
14
- transcription = hf_interface(audio_path)[0]["generated_text"] # Extract transcription from zephyr
15
- response = hf_interface(transcription)[0]["generated_text"] # Get Zephyr's response
16
  return transcription, response
17
-
18
- input_data.change(generate_response, inputs=[input_data, model_choice], outputs=output_text)
19
 
20
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @app.function()
2
+ class InteractiveChat:
3
+ def __init__(self, model_name="openai/whisper-large", tts_choice="OpenVoice", **kwargs):
4
+ self.whisper_processor = WhisperProcessor.from_pretrained(model_name)
5
+ self.whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name)
6
+ self.zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
7
+ self.zephyr_model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto")
8
+ self.zephyr_pipeline = pipeline("text-generation", model=self.zephyr_model, tokenizer=self.zephyr_tokenizer)
9
+ self.tts_choice = tts_choice
10
 
11
+ def generate_response(self, input_data):
12
+ input_features = self.whisper_processor(input_data, sampling_rate=16_000, return_tensors="pt").input_features
13
+ predicted_ids = self.whisper_model.generate(input_features)
14
+ transcription = self.whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
 
15
 
16
+ # Use the transcription as input for Zephyr
17
+ response = self.zephyr_pipeline(transcription, max_length=1000)[0]["generated_text"]
 
 
 
 
 
18
  return transcription, response
 
 
19
 
20
+ def speak(self, text):
21
+ try:
22
+ if self.tts_choice == "OpenVoice":
23
+ model_path = snapshot_download("facebook/mms-tts-eng")
24
+ pipe = pipeline("text-to-speech", model=model_path)
25
+ audio_array = pipe(text).audio
26
+ pygame.mixer.init()
27
+ sound = pygame.sndarray.make_sound(audio_array)
28
+ sound.play()
29
+ pygame.time.delay(int(sound.get_length() * 1000))
30
+ else: # gTTS
31
+ tts = gTTS(text=text, lang='en')
32
+ tts.save("response.mp3")
33
+ pygame.mixer.init()
34
+ pygame.mixer.music.load("response.mp3")
35
+ pygame.mixer.music.play()
36
+ while pygame.mixer.music.get_busy():
37
+ pygame.time.Clock().tick(10)
38
+ except Exception as e:
39
+ print("Error occurred during speech generation:", e)
40
+
41
+
42
+ with gr.Blocks() as demo:
43
+ model_choice = gr.Dropdown(["openai/whisper-large"], label="Whisper Model", value="openai/whisper-large")
44
+ tts_choice = gr.Radio(["OpenVoice", "gTTS"], label="TTS Engine", value="OpenVoice")
45
+ input_data = gr.Audio(source="microphone", type="numpy", label="Speak Your Message")
46
+ output_text = gr.Textbox(label="Transcription and Response")
47
+
48
+ model_choice.change(lambda x, y: InteractiveChat(x, y), inputs=[model_choice, tts_choice], outputs=None)
49
+ tts_choice.change(lambda x, y: InteractiveChat(y, x), inputs=[tts_choice, model_choice], outputs=None)
50
+ input_data.change(lambda x, model: model.generate_response(x), inputs=[input_data, model_choice],
51
+ outputs=output_text)
52
+ input_data.change(lambda x, model: model.speak(x[1]), inputs=[output_text, model_choice],
53
+ outputs=None) # Speak the response