Attempt to make response go to audio via pipeline
Browse files
app.py
CHANGED
@@ -3,11 +3,13 @@ import toml
|
|
3 |
from omegaconf import OmegaConf
|
4 |
from query import VectaraQuery
|
5 |
import os
|
6 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
|
8 |
import streamlit as st
|
9 |
from PIL import Image
|
10 |
|
|
|
|
|
11 |
master_prompt = """
|
12 |
As a Natural Farming Fertilizers Assistant, you will assist the user with any farming related question, always willing to answer any question and provide useful organic farming advice in the following format.
|
13 |
' ' '
|
@@ -93,27 +95,36 @@ def launch_bot():
|
|
93 |
#call in Mistral
|
94 |
prompt3 = master_prompt + prompt2 + "context:" + response
|
95 |
print("Called in Mistral")
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
100 |
-
|
101 |
-
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
102 |
-
|
103 |
-
#from mistral docs: prompt = "My favourite condiment is"
|
104 |
-
|
105 |
-
|
106 |
-
model_inputs = tokenizer([prompt3], return_tensors="pt").to(device)
|
107 |
-
|
108 |
-
model.to(device)
|
109 |
-
|
110 |
-
|
111 |
-
generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
|
112 |
-
st.write("Mistral:" + tokenizer.batch_decode(generated_ids)[0])
|
113 |
# else:
|
114 |
-
|
115 |
message = {"role": "assistant", "content": response}
|
116 |
st.session_state.messages.append(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
if __name__ == "__main__":
|
119 |
launch_bot()
|
|
|
3 |
from omegaconf import OmegaConf
|
4 |
from query import VectaraQuery
|
5 |
import os
|
6 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
7 |
|
8 |
import streamlit as st
|
9 |
from PIL import Image
|
10 |
|
11 |
+
model = pipeline("sentiment-analysis") #needs finishing
|
12 |
+
|
13 |
master_prompt = """
|
14 |
As a Natural Farming Fertilizers Assistant, you will assist the user with any farming related question, always willing to answer any question and provide useful organic farming advice in the following format.
|
15 |
' ' '
|
|
|
95 |
#call in Mistral
|
96 |
prompt3 = master_prompt + prompt2 + "context:" + response
|
97 |
print("Called in Mistral")
|
98 |
+
# ADD IN LLM
|
99 |
+
# st.write("Mistral:" ) #Needs finishing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
# else:
|
101 |
+
st.write(response)
|
102 |
message = {"role": "assistant", "content": response}
|
103 |
st.session_state.messages.append(message)
|
104 |
+
|
105 |
+
# If assistant has most recently reaponded create audio of response
|
106 |
+
if st.session_state.messages[-1]["role"] == "assistant":
|
107 |
+
#text-to-speech
|
108 |
+
pipe = pipeline(model="suno/bark-small")
|
109 |
+
output = pipe()
|
110 |
+
|
111 |
+
audio = output[response]
|
112 |
+
sampling_rate = output["sampling_rate"]
|
113 |
+
# ST interface for audio
|
114 |
+
audio_file = audio
|
115 |
+
audio_bytes = audio_file.read()
|
116 |
+
|
117 |
+
st.audio(audio_bytes, format='audio/ogg')
|
118 |
+
|
119 |
+
sample_rate = 44100 # 44100 samples per second
|
120 |
+
seconds = 2 # Note duration of 2 seconds
|
121 |
+
frequency_la = 440 # Our played note will be 440 Hz
|
122 |
+
# Generate array with seconds*sample_rate steps, ranging between 0 and seconds
|
123 |
+
t = np.linspace(0, seconds, seconds * sample_rate, False)
|
124 |
+
# Generate a 440 Hz sine wave
|
125 |
+
note_la = np.sin(frequency_la * t * 2 * np.pi)
|
126 |
+
|
127 |
+
st.audio(note_la, sample_rate=sample_rate)
|
128 |
|
129 |
if __name__ == "__main__":
|
130 |
launch_bot()
|