awacke1 commited on
Commit
33325d0
1 Parent(s): 7fe34cd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import firebase_admin
3
+ from firebase_admin import credentials
4
+ from firebase_admin import firestore
5
+ import datetime
6
+ from transformers import pipeline
7
+ import gradio as gr
8
+
9
+ @st.experimental_singleton
10
+ def get_db_firestore():
11
+ cred = credentials.Certificate('test.json')
12
+ firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
13
+ db = firestore.client()
14
+ return db
15
+
16
+
17
+ db = get_db_firestore()
18
+ asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
19
+
20
+ def transcribe(audio):
21
+ text = asr(audio)["text"]
22
+ return text
23
+
24
+ classifier = pipeline("text-classification")
25
+
26
+ def speech_to_text(speech):
27
+ text = asr(speech)["text"]
28
+ return text
29
+
30
+ def text_to_sentiment(text):
31
+ sentiment = classifier(text)[0]["label"]
32
+ return sentiment
33
+
34
+ def upsert(text):
35
+ date_time =str(datetime.datetime.today())
36
+ doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
37
+ doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
38
+ saved = select('Text2SpeechSentimentSave', date_time)
39
+ # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
40
+ return saved
41
+
42
+ def select(collection, document):
43
+ doc_ref = db.collection(collection).document(document)
44
+ doc = doc_ref.get()
45
+ docid = ("The id is: ", doc.id)
46
+ contents = ("The contents are: ", doc.to_dict())
47
+ return contents
48
+
49
+ def selectall(text):
50
+ docs = db.collection('Text2SpeechSentimentSave').stream()
51
+ doclist=''
52
+ for doc in docs:
53
+ #docid=doc.id
54
+ #dict=doc.to_dict()
55
+ #doclist+=doc.to_dict()
56
+ r=(f'{doc.id} => {doc.to_dict()}')
57
+ doclist += r
58
+ return doclist
59
+
60
+ demo = gr.Blocks()
61
+
62
+ with demo:
63
+ #audio_file = gr.Audio(type="filepath")
64
+ audio_file = gr.inputs.Audio(source="microphone", type="filepath")
65
+ text = gr.Textbox()
66
+ label = gr.Label()
67
+ saved = gr.Textbox()
68
+ savedAll = gr.Textbox()
69
+
70
+ b1 = gr.Button("Recognize Speech")
71
+ b2 = gr.Button("Classify Sentiment")
72
+ b3 = gr.Button("Save Speech to Text")
73
+ b4 = gr.Button("Retrieve All")
74
+
75
+ b1.click(speech_to_text, inputs=audio_file, outputs=text)
76
+ b2.click(text_to_sentiment, inputs=text, outputs=label)
77
+ b3.click(upsert, inputs=text, outputs=saved)
78
+ b4.click(selectall, inputs=text, outputs=savedAll)
79
+
80
+ demo.launch(share=True)