FranklinWillemen commited on
Commit
f41a51e
1 Parent(s): b4103dd
Files changed (1) hide show
  1. app.py → discourse.py +7 -28
app.py → discourse.py RENAMED
@@ -1,16 +1,14 @@
1
- import gradio as gr
2
  import openai
3
- import config
4
  import os
5
  import azure.cognitiveservices.speech as speechsdk
6
 
7
  openai.api_key = os.environ['OPEN_AI_KEY']
8
 
9
  speech_config = speechsdk.SpeechConfig(subscription=os.environ['AZURE_SPEECH_KEY'], region="westeurope")
10
- #audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
11
  speech_config.speech_synthesis_voice_name = "nl-NL-ColetteNeural"
12
  speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
13
 
 
14
  context = [{"role": "system", "content": 'You are a clever and helpfull conversationalist. \
15
  Respond to all input in 25 words or less. \
16
  Do not mention that you are an AI or service of any kind.'}]
@@ -29,7 +27,7 @@ def gen_voice(response, response_filename):
29
  stream = speechsdk.AudioDataStream(reponse_audio)
30
  stream.save_to_wav_file(response_filename)
31
 
32
- def conversation(audio:str):
33
  transcript = transcribe("whisper-1", audio)
34
  context.append({"role": "user", "content": transcript['text']})
35
 
@@ -38,28 +36,9 @@ def conversation(audio:str):
38
 
39
  gen_voice(response, "voice.wav")
40
 
41
- chat_transcript = ""
42
- for message in context:
43
- if message['role'] != 'system':
44
- chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
45
-
46
- return "voice.wav"
47
-
48
- # set a custom theme
49
- theme = gr.themes.Default().set(
50
- body_background_fill="#000000",
51
- )
52
-
53
- with gr.Blocks(theme=theme) as ui:
54
- # advisor image input and microphone input
55
- #advisor = gr.Image(value=config.TARS_LOGO).style(width=config.LOGO_IMAGE_WIDTH, height=config.LOGO_IMAGE_HEIGHT)
56
- audio_input = gr.Audio(source="microphone", type="filepath")
57
- audio_output = gr.Audio()
58
-
59
- # text transcript output and audio
60
- # text_output = gr.Textbox(label="Transcript")
61
-
62
- btn = gr.Button("Run")
63
- btn.click(fn=conversation, inputs=audio_input, outputs=[audio_output])
64
 
65
- ui.launch()
 
 
1
  import openai
 
2
  import os
3
  import azure.cognitiveservices.speech as speechsdk
4
 
5
  openai.api_key = os.environ['OPEN_AI_KEY']
6
 
7
  speech_config = speechsdk.SpeechConfig(subscription=os.environ['AZURE_SPEECH_KEY'], region="westeurope")
 
8
  speech_config.speech_synthesis_voice_name = "nl-NL-ColetteNeural"
9
  speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
10
 
11
+
12
  context = [{"role": "system", "content": 'You are a clever and helpfull conversationalist. \
13
  Respond to all input in 25 words or less. \
14
  Do not mention that you are an AI or service of any kind.'}]
 
27
  stream = speechsdk.AudioDataStream(reponse_audio)
28
  stream.save_to_wav_file(response_filename)
29
 
30
+ def respond(audio:str):
31
  transcript = transcribe("whisper-1", audio)
32
  context.append({"role": "user", "content": transcript['text']})
33
 
 
36
 
37
  gen_voice(response, "voice.wav")
38
 
39
+ # chat_transcript = ""
40
+ # for message in context:
41
+ # if message['role'] != 'system':
42
+ # chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ return "voice.wav"