Spaces:
Runtime error
Runtime error
import streamlit as st | |
from gradio_client import Client | |
from st_audiorec import st_audiorec | |
from gtts import gTTS | |
import os | |
# Constants | |
TITLE = "𝗙𝗹𝗮𝘄𝗹𝗲𝘀𝘀𝗔𝗜 💬" | |
DESCRIPTION = """ | |
---- | |
ғʟᴀᴡʟᴇssᴀɪ | ᴄʜᴀᴛɢᴘᴛ's ɴɪɢʜᴛᴍᴀʀᴇ 😈 | |
---- | |
""" | |
# Initialize client | |
whisper_client = Client("https://hf-audio-whisper-large-v3.hf.space/") | |
# Function to convert text to speech using gTTS | |
def text_to_speech(text, lang='en'): | |
tts = gTTS(text=text, lang=lang, slow=False) | |
tts.save("response.mp3") | |
return "response.mp3" | |
# Function to transcribe audio | |
def transcribe(wav_path): | |
return whisper_client.predict( | |
wav_path, | |
"transcribe", | |
api_name="/predict" | |
) | |
# Prediction function | |
def predict(message, system_prompt='Your name is FlawlessAI. You are developed by Achyuth. Your developer is a 14-year old boy, who is intrested on AI and Machine Learning. Dont reveal your developers age, until the user asks for your developers age. When the user asks you for information, never exclaim the words, and directly come to point and answer to the user. You need to provide short text-answers, meaningful answers and directly to the point. Never miscalculate the calculations. You need to check and verify the answer and then provide to user, the correct answer. You need to provide accurate and correct information. If the user conveys with you in any language, you also should convey in the same language. For example, the user said, "నమస్తే", you need to reply "నమస్తే! నేను మీ అగ్రిట్యూర్ని. ఈ రోజు మీకు సహాయం చేయడానికి నేను ఇక్కడ ఉన్నాను. ఈ రోజు విషయం ఏమిటి? ఏదైనా విషయంలో ఏమైనా సందేహాలు ఉన్నాయా?". You need to definitely reply in the same language!', temperature=0.5, max_new_tokens=2048, Topp=0.5, Repetitionpenalty=1.2): | |
with st.status("Starting client"): | |
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/") | |
st.write("Requesting MP3 Transcriber") | |
with st.status("Requesting OpenGPT"): | |
st.write("Requesting API") | |
response = client.predict( | |
message, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
Topp, | |
500, | |
Repetitionpenalty, | |
api_name="/chat" | |
) | |
st.write("Done ✅") | |
return response | |
# Streamlit UI | |
st.title(TITLE) | |
st.write(DESCRIPTION) | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"], avatar=("🧑💻" if message["role"] == 'human' else '🦙')): | |
st.markdown(message["content"]) | |
textinput = st.chat_input("Ask 𝗢𝗽𝗲𝗻𝗚𝗣𝗧 anything...") | |
wav_audio_data = st_audiorec() | |
if wav_audio_data is not None: | |
with st.status("Transcribing audio..."): | |
# save audio | |
with open("audio.wav", "wb") as f: | |
f.write(wav_audio_data) | |
prompt = transcribe("audio.wav") | |
st.write("Transcribed Given Audio ✔") | |
st.chat_message("human", avatar="😎").markdown(prompt) | |
st.session_state.messages.append({"role": "human", "content": prompt}) | |
# transcribe audio | |
response = predict(message=prompt) | |
with st.chat_message("assistant", avatar='😎'): | |
st.markdown(response) | |
# Convert AI response to speech | |
speech_file = text_to_speech(response) | |
# Play the generated speech | |
st.audio(speech_file, format="audio/mp3") | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
# React to user input | |
if prompt := textinput: | |
# Display user message in chat message container | |
st.chat_message("human", avatar="😎").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "human", "content": prompt}) | |
response = predict(message=prompt) | |
# Convert AI response to speech | |
speech_file = text_to_speech(response) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant", avatar='😎'): | |
st.markdown(response) | |
# Play the generated speech | |
st.audio(speech_file, format="audio/mp3") | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |