Solshine commited on
Commit
e1c809c
1 Parent(s): 24f736a

Added temp file

Browse files
Files changed (1) hide show
  1. app.py +37 -23
app.py CHANGED
@@ -5,6 +5,7 @@ from query import VectaraQuery
5
  import os
6
  from transformers import pipeline
7
  import numpy as np
 
8
 
9
  import streamlit as st
10
  from PIL import Image
@@ -28,6 +29,9 @@ As a Natural Farming Fertilizers Assistant, you will assist the user with any fa
28
  User prompt:
29
  """
30
 
 
 
 
31
  def launch_bot():
32
  def generate_response(question):
33
  response = vq.submit_query(question)
@@ -106,31 +110,41 @@ def launch_bot():
106
  audio_result = st.button("Convert to Audio 🔊")
107
  if audio_result:
108
  with st.chat_message("text-to-speech"):
109
- with st.spinner("Thinking..."):
110
  #text-to-speech
111
- print("Calling in Text-to-speech via suno/bark-small")
112
- synthesiser = pipeline("text-to-speech", "suno/bark-small")
113
-
114
- speech = synthesiser(response, forward_params={"do_sample": True})
115
-
116
- scipy.io.wavfile.write("bark_out.wav", rate=speech["sampling_rate"], data=speech["audio"])
117
- # ST interface for audio
118
- print("Now we try to display the audio file in the app")
119
- audio_file = open('bark_out.wav', 'rb')
120
- audio_bytes = audio_file.read()
121
 
122
- audio_convert = st.audio(audio_bytes, format='audio/wav')
123
- st.session_state.messages.append(audio_convert)
124
-
125
- # sample_rate = 44100 # 44100 samples per second
126
- # seconds = 2 # Note duration of 2 seconds
127
- # frequency_la = 440 # Our played note will be 440 Hz
128
- # Generate array with seconds*sample_rate steps, ranging between 0 and seconds
129
- # t = np.linspace(0, seconds, seconds * sample_rate, False)
130
- # Generate a 440 Hz sine wave
131
- # note_la = np.sin(frequency_la * t * 2 * np.pi)
132
-
133
- # st.audio(note_la, sample_rate=sample_rate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  if __name__ == "__main__":
136
  launch_bot()
 
5
  import os
6
  from transformers import pipeline
7
  import numpy as np
8
+ import tempfile
9
 
10
  import streamlit as st
11
  from PIL import Image
 
29
  User prompt:
30
  """
31
 
32
+ #temporary file system created: used to text-to-speech
33
+ fp = tempfile.TemporaryFile()
34
+
35
  def launch_bot():
36
  def generate_response(question):
37
  response = vq.submit_query(question)
 
110
  audio_result = st.button("Convert to Audio 🔊")
111
  if audio_result:
112
  with st.chat_message("text-to-speech"):
 
113
  #text-to-speech
114
+ print("Calling in Text-to-speech via suno/bark-small")
115
+
116
+ synthesiser = pipeline("text-to-speech", "suno/bark-small")
 
 
 
 
 
 
 
117
 
118
+ speech = synthesiser(response, forward_params={"do_sample": True})
119
+
120
+ # write to temp file: the scipy wav write bark_out.wav
121
+ fp.write(scipy.io.wavfile.write("bark_out.wav", rate=speech["sampling_rate"], data=speech["audio"]))
122
+
123
+ # ST interface for audio
124
+ print("Now we try to display the audio file in the app")
125
+
126
+ # Temp file access
127
+ fp.seek(0)
128
+ audio_file = fp.read('bark_out.wav', 'rb')
129
+
130
+ audio_bytes = audio_file.read()
131
+
132
+ audio_convert = st.audio(audio_bytes, format='audio/wav')
133
+ st.session_state.messages.append(audio_convert)
134
+ sample_rate = 44100 # 44100 samples per second
135
+ seconds = 2 # Note duration of 2 seconds
136
+
137
+ frequency_la = 440 # Our played note will be 440 Hz
138
+
139
+ # Generate array with seconds*sample_rate steps, ranging between 0 and seconds
140
+ t = np.linspace(0, seconds, seconds * sample_rate, False)
141
+
142
+ # Generate a 440 Hz sine wave
143
+ note_la = np.sin(frequency_la * t * 2 * np.pi)
144
+ st.audio(note_la, sample_rate=sample_rate)
145
+
146
+ # Close and delete temp file
147
+ fp.close()
148
 
149
  if __name__ == "__main__":
150
  launch_bot()