Spaces:
Runtime error
Runtime error
import streamlit as st | |
import firebase_admin | |
from firebase_admin import credentials | |
from firebase_admin import firestore | |
import datetime | |
from transformers import pipeline | |
import gradio as gr | |
def get_db_firestore(): | |
cred = credentials.Certificate('test.json') | |
firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',}) | |
db = firestore.client() | |
return db | |
def upsertoftheminute(collection, document, firefield, first, last, born): | |
date_time =str(datetime.datetime.today()).split()[0] | |
doc_ref = db.collection(collection).document(document) | |
doc_ref.set({u'firefield': firefield, u'first': first, u'last': last, u'born': date_time,}) | |
def selectCollectionDocument(collection, document): | |
doc_ref = db.collection(collection).document(document) | |
doc = doc_ref.get() | |
st.write("The id is: ", doc.id) | |
st.write("The contents are: ", doc.to_dict()) | |
db = get_db_firestore() | |
upsertoftheminute(u'TimeSeries', u'DocumentofMinute', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', u'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', 2022) | |
selectCollectionDocument(u"TimeSeries", u"DocumentofMinute") | |
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h") | |
def transcribe(audio): | |
text = asr(audio)["text"] | |
return text | |
gr.Interface( | |
fn=transcribe, | |
inputs=gr.inputs.Audio(source="microphone", type="filepath"), | |
outputs="text").launch() | |
classifier = pipeline("text-classification") | |
def speech_to_text(speech): | |
text = asr(speech)["text"] | |
upsertoftheminute(u'TimeSeries', u'DocumentofMinuteText', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', text, 2022) | |
return text | |
def text_to_sentiment(text): | |
sentiment = classifier(text)[0]["label"] | |
upsertoftheminute(u'TimeSeries', u'DocumentofMinuteSentiment', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', sentiment, 2022) | |
return sentiment | |
demo = gr.Blocks() | |
with demo: | |
audio_file = gr.Audio(type="filepath") | |
text = gr.Textbox() | |
label = gr.Label() | |
b1 = gr.Button("Recognize Speech") | |
b2 = gr.Button("Classify Sentiment") | |
b1.click(speech_to_text, inputs=audio_file, outputs=text) | |
b2.click(text_to_sentiment, inputs=text, outputs=label) | |
demo.launch() |