awacke1 commited on
Commit
b3f66a0
1 Parent(s): 11065bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -36,7 +36,7 @@ db = get_db_firestore()
36
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
37
 
38
  # create Text Classification pipeline
39
- #classifier = pipeline("text-classification")
40
 
41
  # create text generator pipeline
42
  story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
@@ -73,9 +73,6 @@ def selectall(text):
73
  docs = db.collection('Text2SpeechSentimentSave').stream()
74
  doclist=''
75
  for doc in docs:
76
- #docid=doc.id
77
- #dict=doc.to_dict()
78
- #doclist+=doc.to_dict()
79
  r=(f'{doc.id} => {doc.to_dict()}')
80
  doclist += r
81
  return doclist
@@ -141,19 +138,19 @@ demo = gr.Blocks()
141
  with demo:
142
 
143
  audio_file = gr.inputs.Audio(source="microphone", type="filepath")
144
- # text = gr.Textbox()
145
- # label = gr.Label()
146
- # saved = gr.Textbox()
147
- # savedAll = gr.Textbox()
148
- # audio = gr.Audio(label="Output", interactive=False)
149
 
150
  b1 = gr.Button("Recognize Speech")
151
- # b2 = gr.Button("Classify Sentiment")
152
- # b3 = gr.Button("Save Speech to Text")
153
  # b4 = gr.Button("Retrieve All")
154
-
155
- # b2.click(text_to_sentiment, inputs=text, outputs=label)
156
- # b3.click(upsert, inputs=text, outputs=saved)
157
  # b4.click(selectall, inputs=text, outputs=savedAll)
158
 
159
  input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
@@ -170,9 +167,6 @@ with demo:
170
  output_interpolation = gr.Video(label="Generated Video")
171
 
172
  # Bind functions to buttons
173
-
174
- b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text )
175
-
176
  button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
177
  button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
178
  button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
 
36
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
37
 
38
  # create Text Classification pipeline
39
+ classifier = pipeline("text-classification")
40
 
41
  # create text generator pipeline
42
  story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
 
73
  docs = db.collection('Text2SpeechSentimentSave').stream()
74
  doclist=''
75
  for doc in docs:
 
 
 
76
  r=(f'{doc.id} => {doc.to_dict()}')
77
  doclist += r
78
  return doclist
 
138
  with demo:
139
 
140
  audio_file = gr.inputs.Audio(source="microphone", type="filepath")
141
+ text = gr.Textbox()
142
+ label = gr.Label()
143
+ saved = gr.Textbox()
144
+ savedAll = gr.Textbox()
145
+ audio = gr.Audio(label="Output", interactive=False)
146
 
147
  b1 = gr.Button("Recognize Speech")
148
+ b2 = gr.Button("Classify Sentiment")
149
+ b3 = gr.Button("Save Speech to Text")
150
  # b4 = gr.Button("Retrieve All")
151
+ b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text )
152
+ b2.click(text_to_sentiment, inputs=text, outputs=label)
153
+ b3.click(upsert, inputs=text, outputs=saved)
154
  # b4.click(selectall, inputs=text, outputs=savedAll)
155
 
156
  input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
 
167
  output_interpolation = gr.Video(label="Generated Video")
168
 
169
  # Bind functions to buttons
 
 
 
170
  button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
171
  button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
172
  button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)