AutoGeneralAI commited on
Commit
62bf2a7
1 Parent(s): 80ed3b2

Synced repo using 'sync_with_huggingface' Github Action

Browse files
Files changed (2) hide show
  1. app.py +46 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai, subprocess
3
+ import os
4
+ # import config
5
+ # openai.api_key = config.OPENAI_API_KEY
6
+
7
+ messages = [{"role": "system", "content": 'You are a therapist. Respond to all input in 25 words or less.'}]
8
+
9
+ def transcribe(key, audio):
10
+ openai.api_key = key
11
+ global messages
12
+
13
+ audio_filename_with_extension = audio + '.wav'
14
+ os.rename(audio, audio_filename_with_extension)
15
+
16
+ audio_file = open(audio_filename_with_extension, "rb")
17
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
18
+
19
+ messages.append({"role": "user", "content": transcript["text"]})
20
+
21
+ response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
22
+
23
+ system_message = response["choices"][0]["message"]
24
+ messages.append(system_message)
25
+
26
+ #subprocess.call(["say", system_message['content']])
27
+ print("output: " + system_message['content'] + "\n")
28
+
29
+ chat_transcript = ""
30
+ for message in messages:
31
+ if message['role'] != 'system':
32
+ chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
33
+
34
+ return chat_transcript
35
+
36
+ # ui = gr.Interface(fn=transcribe, inputs=["text", gr.Audio(source="microphone", type="filepath")], outputs="text").launch()
37
+ keyTxt = gr.Textbox(
38
+ show_label=True,
39
+ placeholder=f"Your API-key...",
40
+ type="password",
41
+ visible=True,
42
+ label="API-Key",
43
+ )
44
+ ui = gr.Interface(fn=transcribe, inputs=[keyTxt, gr.Audio(source="microphone", type="filepath")], outputs="text").launch()
45
+
46
+ ui.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ openai
2
+ gradio