ALVHB95 commited on
Commit
c74538d
·
1 Parent(s): 48b6d70
Files changed (1) hide show
  1. app.py +40 -18
app.py CHANGED
@@ -119,7 +119,39 @@ qa_chain = ConversationalRetrievalChain.from_llm(
119
  output_key = 'output',
120
  )
121
 
122
- def chat_interface(question,history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  result = qa_chain.invoke({'question': question})
124
  output_string = result['output']
125
 
@@ -134,26 +166,16 @@ def chat_interface(question,history):
134
 
135
  # Extract the answer value between double quotes
136
  answer_value = answer_part[quote_index + 1:answer_part.find('"', quote_index + 1)]
137
-
138
- return answer_value
139
 
 
140
 
141
- chatbot_gradio_app = gr.ChatInterface(
142
  fn=chat_interface,
143
- additional_inputs=gr.Audio(sources=["microphone"]),
144
- additional_inputs_accordion=gr.Accordion(label="Audio aquí ;)", open=True)
145
- title=custom_title
 
 
146
  )
147
 
148
- # chatbot_gradio_app = gr.Interface(
149
- # fn=chat_interface,
150
- # inputs=[
151
- # gr.Textbox(lines=3, label="Type your message here"),
152
- # gr.Audio(label="Record your voice")
153
- # ],
154
- # outputs=gr.Textbox(label="Bot's Response"),
155
- # )
156
-
157
-
158
- chatbot_gradio_app.queue()
159
  chatbot_gradio_app.launch()
 
119
  output_key = 'output',
120
  )
121
 
122
+ from transformers import pipeline
123
+ import soundfile as sf
124
+ import gradio as gr
125
+ from gradio_client import Client
126
+
127
+ # Load ASR pipeline
128
+ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large")
129
+
130
+ def chat_interface(question, audio_input=None, history=None):
131
+ if audio_input is not None:
132
+ # Function to transcribe the audio input
133
+ def transcribe_audio(audio):
134
+ sample_rate, audio_data = audio
135
+ file_name = "recorded_audio.wav"
136
+ # Write audio data to a file
137
+ sf.write(file_name, audio_data, sample_rate)
138
+ # Transcribe audio using ASR model
139
+ transcript = asr_pipe(file_name)["text"]
140
+ return transcript
141
+
142
+ # Transcribe the audio input
143
+ transcribed_text = transcribe_audio(audio_input)
144
+
145
+ # Call the chatbot API with the transcribed text
146
+ client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
147
+ response = client.predict(
148
+ transcribed_text,
149
+ api_name="/chat"
150
+ )
151
+
152
+ return response
153
+
154
+ # Original chatbot logic
155
  result = qa_chain.invoke({'question': question})
156
  output_string = result['output']
157
 
 
166
 
167
  # Extract the answer value between double quotes
168
  answer_value = answer_part[quote_index + 1:answer_part.find('"', quote_index + 1)]
 
 
169
 
170
+ return answer_value
171
 
172
+ chatbot_gradio_app = gr.Interface(
173
  fn=chat_interface,
174
+ inputs=[
175
+ gr.Textbox(lines=3, label="Type your message here"),
176
+ gr.Audio(label="Record your voice", type="microphone") # Change type to "microphone"
177
+ ],
178
+ outputs=gr.Textbox(label="Bot's Response"),
179
  )
180
 
 
 
 
 
 
 
 
 
 
 
 
181
  chatbot_gradio_app.launch()