awacke1 commited on
Commit
caa3d15
1 Parent(s): 71f3396

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -24
app.py CHANGED
@@ -19,11 +19,9 @@ def get_db_firestore():
19
  db = firestore.client()
20
  return db
21
 
22
-
23
  db = get_db_firestore()
24
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
25
 
26
-
27
  MODEL_NAMES = [
28
  # "en/ek1/tacotron2",
29
  "en/ljspeech/tacotron2-DDC",
@@ -58,8 +56,6 @@ for MODEL_NAME in MODEL_NAMES:
58
  )
59
  MODELS[MODEL_NAME] = synthesizer
60
 
61
-
62
-
63
  def transcribe(audio):
64
  text = asr(audio)["text"]
65
  return text
@@ -77,8 +73,8 @@ def text_to_sentiment(text):
77
  def upsert(text):
78
  date_time =str(datetime.datetime.today())
79
  doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
80
- doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
81
- saved = select('Text2SpeechSentimentSave', date_time)
82
  # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
83
  return saved
84
 
@@ -135,21 +131,4 @@ with demo:
135
  b4.click(selectall, inputs=text, outputs=savedAll)
136
  b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
137
 
138
- demo.launch(share=True)
139
-
140
-
141
-
142
-
143
- #iface = gr.Interface(
144
- # fn=tts,
145
- # inputs=[
146
- # gr.inputs.Textbox( label="Input", default="Hello, how are you?", ),
147
- # gr.inputs.Radio( label="Pick a TTS Model", choices=MODEL_NAMES, ),
148
- # ],
149
- # outputs=gr.outputs.Audio(label="Output"),
150
- # title="🐸💬 - Coqui TTS",
151
- # theme="huggingface",
152
- # description="🐸💬 - a deep learning toolkit for Text-to-Speech, battle-tested in research and production",
153
- # article="more info at https://github.com/coqui-ai/TTS",
154
- #)
155
- #iface.launch()
 
19
  db = firestore.client()
20
  return db
21
 
 
22
  db = get_db_firestore()
23
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
24
 
 
25
  MODEL_NAMES = [
26
  # "en/ek1/tacotron2",
27
  "en/ljspeech/tacotron2-DDC",
 
56
  )
57
  MODELS[MODEL_NAME] = synthesizer
58
 
 
 
59
  def transcribe(audio):
60
  text = asr(audio)["text"]
61
  return text
 
73
  def upsert(text):
74
  date_time =str(datetime.datetime.today())
75
  doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
76
+ doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/TTS-STT-Blocks/', u'last': text, u'born': date_time,})
77
+ saved = select('TTS-STT', date_time)
78
  # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
79
  return saved
80
 
 
131
  b4.click(selectall, inputs=text, outputs=savedAll)
132
  b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
133
 
134
+ demo.launch(share=True)