Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -26,8 +26,8 @@ import urllib.request
|
|
26 |
import sqlite3
|
27 |
import pandas as pd
|
28 |
import pandasql as ps
|
29 |
-
import
|
30 |
-
import
|
31 |
|
32 |
|
33 |
def clean(value):
|
@@ -253,43 +253,46 @@ def g_sheet_log(myinput, output):
|
|
253 |
).execute()
|
254 |
|
255 |
openai.api_key = st.secrets["OPENAI_KEY"]
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
RECORD_SECONDS = 5
|
261 |
|
262 |
def record_audio():
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
#
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
#
|
279 |
-
stream.
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
#
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
wf.
|
289 |
-
wf.
|
290 |
-
|
291 |
-
#
|
292 |
-
|
|
|
|
|
|
|
|
|
293 |
|
294 |
|
295 |
def openai_response(PROMPT):
|
@@ -625,6 +628,7 @@ elif Usage == 'Random Questions':
|
|
625 |
transcription = openai.Audio.transcribe("whisper-1", file)
|
626 |
result = transcription["text"]
|
627 |
st.write(f"Fetched from audio - {result}")
|
|
|
628 |
question = result
|
629 |
response = openai.Completion.create(
|
630 |
model="text-davinci-003",
|
|
|
26 |
import sqlite3
|
27 |
import pandas as pd
|
28 |
import pandasql as ps
|
29 |
+
import sounddevice as sd
|
30 |
+
import soundfile as sf
|
31 |
|
32 |
|
33 |
def clean(value):
|
|
|
253 |
).execute()
|
254 |
|
255 |
openai.api_key = st.secrets["OPENAI_KEY"]
|
256 |
+
duration = 5
|
257 |
+
fs = 44100
|
258 |
+
channels = 1
|
259 |
+
filename = "output.wav"
|
|
|
260 |
|
261 |
def record_audio():
|
262 |
+
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
|
263 |
+
sd.wait()
|
264 |
+
sf.write(filename, myrecording, fs)
|
265 |
+
return filename
|
266 |
+
# p = pyaudio.PyAudio()
|
267 |
+
|
268 |
+
# # Open the microphone stream
|
269 |
+
# stream = p.open(format=FORMAT,
|
270 |
+
# channels=CHANNELS,
|
271 |
+
# rate=RATE,
|
272 |
+
# input=True,
|
273 |
+
# frames_per_buffer=CHUNK)
|
274 |
+
|
275 |
+
# # Record the audio
|
276 |
+
# frames = []
|
277 |
+
# for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
|
278 |
+
# data = stream.read(CHUNK)
|
279 |
+
# frames.append(data)
|
280 |
+
|
281 |
+
# # Close the microphone stream
|
282 |
+
# stream.stop_stream()
|
283 |
+
# stream.close()
|
284 |
+
# p.terminate()
|
285 |
+
|
286 |
+
# # Save the recorded audio to a WAV file
|
287 |
+
# wf = wave.open("output.mp3", "wb")
|
288 |
+
# wf.setnchannels(CHANNELS)
|
289 |
+
# wf.setsampwidth(p.get_sample_size(FORMAT))
|
290 |
+
# wf.setframerate(RATE)
|
291 |
+
# wf.writeframes(b"".join(frames))
|
292 |
+
# wf.close()
|
293 |
+
|
294 |
+
# # Return the path to the recorded audio file
|
295 |
+
# return "output.mp3"
|
296 |
|
297 |
|
298 |
def openai_response(PROMPT):
|
|
|
628 |
transcription = openai.Audio.transcribe("whisper-1", file)
|
629 |
result = transcription["text"]
|
630 |
st.write(f"Fetched from audio - {result}")
|
631 |
+
|
632 |
question = result
|
633 |
response = openai.Completion.create(
|
634 |
model="text-davinci-003",
|