Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,66 +1,107 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
2 |
import os
|
3 |
import logging
|
4 |
-
from
|
5 |
-
|
6 |
-
from google.oauth2 import service_account
|
7 |
|
8 |
# Set up logging
|
9 |
logging.basicConfig(level=logging.INFO)
|
10 |
logger = logging.getLogger(__name__)
|
11 |
|
12 |
-
#
|
13 |
-
def
|
14 |
-
|
15 |
-
|
16 |
-
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = google_credentials_path
|
17 |
-
logging.info(f"Google Cloud credentials set from {google_credentials_path}")
|
18 |
-
else:
|
19 |
-
raise FileNotFoundError(f"Google Cloud credentials file not found at {google_credentials_path}")
|
20 |
-
|
21 |
-
# Initialize the client for Google Cloud AI Platform
|
22 |
-
def init_google_ai_platform(api_key):
|
23 |
-
aiplatform.init(api_key=api_key)
|
24 |
-
|
25 |
-
# Function to generate a response using Hugging Face
|
26 |
-
def generate_hugging_face_response(prompt):
|
27 |
-
# Load a Hugging Face model for text generation
|
28 |
-
generator = pipeline('text-generation', model='gpt2')
|
29 |
-
# Generate text based on the prompt
|
30 |
-
response = generator(prompt, max_length=50, num_return_sequences=1)
|
31 |
-
return response[0]['generated_text']
|
32 |
-
|
33 |
-
# Function to generate a response using Google Cloud AI Platform
|
34 |
-
def generate_google_ai_response(prompt):
|
35 |
-
response = aiplatform.Model.predict(
|
36 |
-
model_name="gemini-1.5-flash",
|
37 |
-
instances=[{"prompt": prompt}]
|
38 |
-
)
|
39 |
-
return response.predictions[0]['content']
|
40 |
-
|
41 |
-
# Main function to run the application
|
42 |
-
def main():
|
43 |
-
try:
|
44 |
-
# Set up Google Cloud credentials
|
45 |
-
setup_google_cloud_credentials()
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
except Exception as e:
|
63 |
-
logger.error(f"Error
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
|
|
65 |
if __name__ == "__main__":
|
66 |
main()
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
import whisper
|
4 |
+
from gtts import gTTS
|
5 |
+
import tempfile
|
6 |
import os
|
7 |
import logging
|
8 |
+
from pydub import AudioSegment
|
9 |
+
import openai
|
|
|
10 |
|
11 |
# Set up logging
|
12 |
logging.basicConfig(level=logging.INFO)
|
13 |
logger = logging.getLogger(__name__)
|
14 |
|
15 |
+
# Load Hugging Face model for text generation (instead of Google Cloud)
|
16 |
+
def load_hf_model():
|
17 |
+
# Load a model for heart health-related questions
|
18 |
+
return pipeline("text-generation", model="gpt2")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
# Load Whisper model for transcription
|
21 |
+
def load_whisper_model():
|
22 |
+
return whisper.load_model("base")
|
23 |
|
24 |
+
# Function to generate response using Hugging Face model
|
25 |
+
def generate_hf_response(model, prompt):
|
26 |
+
result = model(prompt, max_length=100, num_return_sequences=1)
|
27 |
+
return result[0]["generated_text"]
|
28 |
|
29 |
+
# Function to process audio input using Whisper and Hugging Face
|
30 |
+
def process_audio(audio_file, hf_model, whisper_model):
|
31 |
+
try:
|
32 |
+
# Transcribe audio using Whisper
|
33 |
+
result = whisper_model.transcribe(audio_file)
|
34 |
+
user_text = result['text']
|
35 |
+
logger.info(f"Transcription successful: {user_text}")
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Error in transcribing audio: {e}")
|
38 |
+
return "Error in transcribing audio.", None
|
39 |
|
40 |
+
try:
|
41 |
+
# Generate response using Hugging Face model
|
42 |
+
response_text = generate_hf_response(hf_model, user_text)
|
43 |
+
logger.info(f"Generated response: {response_text}")
|
44 |
+
except Exception as e:
|
45 |
+
logger.error(f"Error in generating response: {e}")
|
46 |
+
return "Error in generating response.", None
|
47 |
|
48 |
+
try:
|
49 |
+
# Convert the response text to speech
|
50 |
+
tts = gTTS(text=response_text, lang='en')
|
51 |
+
audio_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
|
52 |
+
tts.save(audio_file.name)
|
53 |
+
logger.info("Text-to-speech conversion successful.")
|
54 |
except Exception as e:
|
55 |
+
logger.error(f"Error in text-to-speech conversion: {e}")
|
56 |
+
return "Error in text-to-speech conversion.", None
|
57 |
+
|
58 |
+
return response_text, audio_file.name
|
59 |
+
|
60 |
+
# Main application layout
|
61 |
+
def main():
|
62 |
+
st.title("Heart Health & Audio Processing App 🫀🎙️ (Hugging Face Edition)")
|
63 |
+
|
64 |
+
# Load models
|
65 |
+
hf_model = load_hf_model()
|
66 |
+
whisper_model = load_whisper_model()
|
67 |
+
|
68 |
+
# Two tabs: one for the chatbot and one for audio processing
|
69 |
+
tab1, tab2 = st.tabs(["Heart Health Chatbot", "Audio Processing"])
|
70 |
+
|
71 |
+
# Tab 1: Heart Health Chatbot
|
72 |
+
with tab1:
|
73 |
+
st.header("Chat with Heart Health Specialist")
|
74 |
+
|
75 |
+
if "history" not in st.session_state:
|
76 |
+
st.session_state.history = []
|
77 |
+
|
78 |
+
user_input = st.text_input("Ask about heart health:", placeholder="Type here...")
|
79 |
+
|
80 |
+
if st.button("Send") and user_input:
|
81 |
+
bot_response = generate_hf_response(hf_model, user_input)
|
82 |
+
st.session_state.history.append({"role": "user", "content": user_input})
|
83 |
+
st.session_state.history.append({"role": "bot", "content": bot_response})
|
84 |
+
|
85 |
+
for chat in st.session_state.history:
|
86 |
+
if chat["role"] == "user":
|
87 |
+
st.write(f"**You:** {chat['content']}")
|
88 |
+
else:
|
89 |
+
st.write(f"**Bot:** {chat['content']}")
|
90 |
+
|
91 |
+
# Tab 2: Audio Processing
|
92 |
+
with tab2:
|
93 |
+
st.header("Audio Processing with Whisper and Hugging Face")
|
94 |
+
|
95 |
+
uploaded_audio = st.file_uploader("Upload an audio file for transcription and response", type=["mp3", "wav", "ogg"])
|
96 |
+
|
97 |
+
if uploaded_audio:
|
98 |
+
with st.spinner("Processing audio..."):
|
99 |
+
response_text, audio_file_path = process_audio(uploaded_audio, hf_model, whisper_model)
|
100 |
+
|
101 |
+
if response_text:
|
102 |
+
st.write(f"**Response:** {response_text}")
|
103 |
+
st.audio(audio_file_path)
|
104 |
|
105 |
+
# Run the app
|
106 |
if __name__ == "__main__":
|
107 |
main()
|