awacke1 commited on
Commit
a8505b9
1 Parent(s): bb1f58a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -0
app.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import io, base64
4
+ from PIL import Image
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ import mediapy
8
+ import os
9
+ import sys
10
+ from huggingface_hub import snapshot_download
11
+
12
+ import streamlit as st
13
+ import firebase_admin
14
+ from firebase_admin import credentials
15
+ from firebase_admin import firestore
16
+ import datetime
17
+
18
+
19
+ # load cloud firestore client which establishes a connection to dataset where we persist data
20
+ @st.experimental_singleton
21
+ def get_db_firestore():
22
+ cred = credentials.Certificate('test.json')
23
+ firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
24
+ db = firestore.client()
25
+ return db
26
+
27
+ #start it up
28
+ db = get_db_firestore()
29
+ asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
30
+
31
+ def transcribe(audio):
32
+ text = asr(audio)["text"]
33
+ return text
34
+
35
+ classifier = pipeline("text-classification")
36
+
37
+ def speech_to_text(speech):
38
+ text = asr(speech)["text"]
39
+ return text
40
+
41
+ def text_to_sentiment(text):
42
+ sentiment = classifier(text)[0]["label"]
43
+ return sentiment
44
+
45
+ def upsert(text):
46
+ date_time =str(datetime.datetime.today())
47
+ doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
48
+ doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
49
+ saved = select('Text2SpeechSentimentSave', date_time)
50
+ # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
51
+ return saved
52
+
53
+ def select(collection, document):
54
+ doc_ref = db.collection(collection).document(document)
55
+ doc = doc_ref.get()
56
+ docid = ("The id is: ", doc.id)
57
+ contents = ("The contents are: ", doc.to_dict())
58
+ return contents
59
+
60
+ def selectall(text):
61
+ docs = db.collection('Text2SpeechSentimentSave').stream()
62
+ doclist=''
63
+ for doc in docs:
64
+ #docid=doc.id
65
+ #dict=doc.to_dict()
66
+ #doclist+=doc.to_dict()
67
+ r=(f'{doc.id} => {doc.to_dict()}')
68
+ doclist += r
69
+ return doclist
70
+
71
+ #demo = gr.Blocks()
72
+
73
+
74
+
75
+ #demo.launch(share=True)
76
+
77
+
78
+ # 1. GPT-J: Story Generation Pipeline
79
+ story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
80
+
81
+ # 2. LatentDiffusion: Latent Diffusion Interface
82
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
83
+
84
+ # 3. FILM: Frame Interpolation Model (code re-use from spaces/akhaliq/frame-interpolation/tree/main)
85
+ os.system("git clone https://github.com/google-research/frame-interpolation")
86
+ sys.path.append("frame-interpolation")
87
+ from eval import interpolator, util
88
+
89
+ ffmpeg_path = util.get_ffmpeg_path()
90
+ mediapy.set_ffmpeg(ffmpeg_path)
91
+
92
+ model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
93
+ interpolator = interpolator.Interpolator(model, None)
94
+
95
+ def generate_story(choice, input_text):
96
+ query = "<BOS> <{0}> {1}".format(choice, input_text)
97
+
98
+ print(query)
99
+ generated_text = story_gen(query)
100
+ generated_text = generated_text[0]['generated_text']
101
+ generated_text = generated_text.split('> ')[2]
102
+
103
+ return generated_text
104
+
105
+ def generate_images(generated_text):
106
+ steps=50
107
+ width=256
108
+ height=256
109
+ num_images=4
110
+ diversity=6
111
+ image_bytes = image_gen(generated_text, steps, width, height, num_images, diversity)
112
+
113
+ # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
114
+ generated_images = []
115
+ for image in image_bytes[1]:
116
+ image_str = image[0]
117
+ image_str = image_str.replace("data:image/png;base64,","")
118
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
119
+ img = Image.open(io.BytesIO(decoded_bytes))
120
+ generated_images.append(img)
121
+
122
+ return generated_images
123
+
124
+ def generate_interpolation(gallery):
125
+ times_to_interpolate = 4
126
+
127
+ generated_images = []
128
+ for image_str in gallery:
129
+ image_str = image_str.replace("data:image/png;base64,","")
130
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
131
+ img = Image.open(io.BytesIO(decoded_bytes))
132
+ generated_images.append(img)
133
+
134
+ generated_images[0].save('frame_0.png')
135
+ generated_images[1].save('frame_1.png')
136
+ generated_images[2].save('frame_2.png')
137
+ generated_images[3].save('frame_3.png')
138
+
139
+ input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
140
+
141
+ frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
142
+
143
+ mediapy.write_video("out.mp4", frames, fps=15)
144
+
145
+ return "out.mp4"
146
+
147
+
148
+
149
+ demo = gr.Blocks()
150
+
151
+ with demo:
152
+
153
+ with demo:
154
+ #audio_file = gr.Audio(type="filepath")
155
+ audio_file = gr.inputs.Audio(source="microphone", type="filepath")
156
+ text = gr.Textbox()
157
+ label = gr.Label()
158
+ saved = gr.Textbox()
159
+ savedAll = gr.Textbox()
160
+
161
+ b1 = gr.Button("Recognize Speech")
162
+ b2 = gr.Button("Classify Sentiment")
163
+ b3 = gr.Button("Save Speech to Text")
164
+ b4 = gr.Button("Retrieve All")
165
+
166
+ b1.click(speech_to_text, inputs=audio_file, outputs=text)
167
+ b2.click(text_to_sentiment, inputs=text, outputs=label)
168
+ b3.click(upsert, inputs=text, outputs=saved)
169
+ b4.click(selectall, inputs=text, outputs=savedAll)
170
+
171
+ with gr.Row():
172
+
173
+ # Left column (inputs)
174
+ with gr.Column():
175
+ input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
176
+ input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
177
+
178
+ gr.Markdown("Be sure to run each of the buttons one at a time, they depend on each others' outputs!")
179
+
180
+ # Rows of instructions & buttons
181
+ with gr.Row():
182
+ gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
183
+ button_gen_story = gr.Button("Generate Story")
184
+ with gr.Row():
185
+ gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
186
+ button_gen_images = gr.Button("Generate Images")
187
+ with gr.Row():
188
+ gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
189
+ button_gen_video = gr.Button("Generate Video")
190
+
191
+ # Rows of references
192
+ with gr.Row():
193
+ gr.Markdown("--Models Used--")
194
+ with gr.Row():
195
+ gr.Markdown("Story Generation: [GPT-J](https://huggingface.co/pranavpsv/gpt2-genre-story-generator)")
196
+ with gr.Row():
197
+ gr.Markdown("Image Generation Conditioned on Text: [Latent Diffusion](https://huggingface.co/spaces/multimodalart/latentdiffusion) | [Github Repo](https://github.com/CompVis/latent-diffusion)")
198
+ with gr.Row():
199
+ gr.Markdown("Interpolations: [FILM](https://huggingface.co/spaces/akhaliq/frame-interpolation) | [Github Repo](https://github.com/google-research/frame-interpolation)")
200
+ with gr.Row():
201
+ gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_story_and_video_generation)")
202
+
203
+ # Right column (outputs)
204
+ with gr.Column():
205
+ output_generated_story = gr.Textbox(label="Generated Story")
206
+ output_gallery = gr.Gallery(label="Generated Story Images")
207
+ output_interpolation = gr.Video(label="Generated Video")
208
+
209
+ # Bind functions to buttons
210
+ button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
211
+ button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
212
+ button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
213
+
214
+ demo.launch(debug=True, enable_queue=True)