File size: 2,075 Bytes
215cca8
 
 
 
d8b9844
eb829ce
 
 
215cca8
 
 
 
 
 
 
 
d8b9844
215cca8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb829ce
 
 
 
 
215cca8
eb829ce
 
 
215cca8
 
 
eb829ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import streamlit as st
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
from transformers import pipeline
import gradio as gr

@st.experimental_singleton
def get_db_firestore():
    cred = credentials.Certificate('test.json')
    firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
    db = firestore.client()
    return db

def upsertoftheminute(collection, document, firefield, first, last, born):
    date_time =str(datetime.datetime.today()).split()[0]
    doc_ref = db.collection(collection).document(document)
    doc_ref.set({u'firefield': firefield, u'first': first, u'last': last, u'born': date_time,})
      
def selectCollectionDocument(collection, document):
    doc_ref = db.collection(collection).document(document)
    doc = doc_ref.get()
    st.write("The id is: ", doc.id)
    st.write("The contents are: ", doc.to_dict())
    
    
db = get_db_firestore()
upsertoftheminute(u'TimeSeries', u'DocumentofMinute', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', u'https://huggingface.co/spaces/awacke1/FirestorePersistence', 2022)
selectCollectionDocument(u"TimeSeries", u"DocumentofMinute")



asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
classifier = pipeline("text-classification")

def speech_to_text(speech):
    text = asr(speech)["text"]
    upsertoftheminute(u'TimeSeries', u'DocumentofMinuteText', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', text, 2022)
    return text

def text_to_sentiment(text):
    sentiment = classifier(text)[0]["label"]
    upsertoftheminute(u'TimeSeries', u'DocumentofMinuteSentiment', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', sentiment, 2022)
    return sentiment 

demo = gr.Blocks()

with demo:
    audio_file = gr.Audio(type="filepath")
    text = gr.Textbox()
    label = gr.Label()

    b1 = gr.Button("Recognize Speech")
    b2 = gr.Button("Classify Sentiment")

    b1.click(speech_to_text, inputs=audio_file, outputs=text)
    b2.click(text_to_sentiment, inputs=text, outputs=label)

demo.launch()