Priyabrata017 commited on
Commit
bae07b8
1 Parent(s): a09b5be

Create new file

Browse files
Files changed (1) hide show
  1. app.py +174 -0
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import io, base64
4
+ from PIL import Image
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ import mediapy
8
+ import os
9
+ import sys
10
+ from huggingface_hub import snapshot_download
11
+
12
+ import streamlit as st
13
+ import firebase_admin
14
+ from firebase_admin import credentials
15
+ from firebase_admin import firestore
16
+ import datetime
17
+ import tempfile
18
+ from typing import Optional
19
+ import numpy as np
20
+ from TTS.utils.manage import ModelManager
21
+ from TTS.utils.synthesizer import Synthesizer
22
+
23
+
24
+ # firestore singleton is a cached multiuser instance to persist shared crowdsource memory
25
+ @st.experimental_singleton
26
+ def get_db_firestore():
27
+ cred = credentials.Certificate('test.json')
28
+ firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
29
+ db = firestore.client()
30
+ return db
31
+
32
+ #start firestore singleton
33
+ db = get_db_firestore()
34
+
35
+ # create ASR ML pipeline
36
+ asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
37
+
38
+ # create Text Classification pipeline
39
+ classifier = pipeline("text-classification")
40
+
41
+ # create text generator pipeline
42
+ story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
43
+
44
+ # transcribe function
45
+ def transcribe(audio):
46
+ text = asr(audio)["text"]
47
+ return text
48
+
49
+ def speech_to_text(speech):
50
+ text = asr(speech)["text"]
51
+ return text
52
+
53
+ def text_to_sentiment(text):
54
+ sentiment = classifier(text)[0]["label"]
55
+ return sentiment
56
+
57
+ def upsert(text):
58
+ date_time =str(datetime.datetime.today())
59
+ doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
60
+ doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
61
+ saved = select('Text2SpeechSentimentSave', date_time)
62
+ # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
63
+ return saved
64
+
65
+ def select(collection, document):
66
+ doc_ref = db.collection(collection).document(document)
67
+ doc = doc_ref.get()
68
+ docid = ("The id is: ", doc.id)
69
+ contents = ("The contents are: ", doc.to_dict())
70
+ return contents
71
+
72
+ def selectall(text):
73
+ docs = db.collection('Text2SpeechSentimentSave').stream()
74
+ doclist=''
75
+ for doc in docs:
76
+ r=(f'{doc.id} => {doc.to_dict()}')
77
+ doclist += r
78
+ return doclist
79
+
80
+ # story gen
81
+ def generate_story(choice, input_text):
82
+ query = "<BOS> <{0}> {1}".format(choice, input_text)
83
+ generated_text = story_gen(query)
84
+ generated_text = generated_text[0]['generated_text']
85
+ generated_text = generated_text.split('> ')[2]
86
+ return generated_text
87
+
88
+ # images gen
89
+ def generate_images(text):
90
+ steps=50
91
+ width=256
92
+ height=256
93
+ num_images=4
94
+ diversity=6
95
+ image_bytes = image_gen(text, steps, width, height, num_images, diversity)
96
+ generated_images = []
97
+ for image in image_bytes[1]:
98
+ image_str = image[0]
99
+ image_str = image_str.replace("data:image/png;base64,","")
100
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
101
+ img = Image.open(io.BytesIO(decoded_bytes))
102
+ generated_images.append(img)
103
+ return generated_images
104
+
105
+ # reductionism - interpolate 4 images - todo - unhardcode the pattern
106
+ def generate_interpolation(gallery):
107
+ times_to_interpolate = 4
108
+ generated_images = []
109
+ for image_str in gallery:
110
+ image_str = image_str.replace("data:image/png;base64,","")
111
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
112
+ img = Image.open(io.BytesIO(decoded_bytes))
113
+ generated_images.append(img)
114
+ generated_images[0].save('frame_0.png')
115
+ generated_images[1].save('frame_1.png')
116
+ generated_images[2].save('frame_2.png')
117
+ generated_images[3].save('frame_3.png')
118
+ input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
119
+ frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
120
+ mediapy.write_video("out.mp4", frames, fps=15)
121
+ return "out.mp4"
122
+
123
+ # image generator
124
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
125
+
126
+ # video generator
127
+ os.system("git clone https://github.com/google-research/frame-interpolation")
128
+ sys.path.append("frame-interpolation")
129
+ from eval import interpolator, util
130
+
131
+ ffmpeg_path = util.get_ffmpeg_path()
132
+ mediapy.set_ffmpeg(ffmpeg_path)
133
+ model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
134
+ interpolator = interpolator.Interpolator(model, None)
135
+
136
+ demo = gr.Blocks()
137
+ with demo:
138
+
139
+ audio_file = gr.inputs.Audio(source="microphone", type="filepath")
140
+ text = gr.Textbox()
141
+ label = gr.Label()
142
+ saved = gr.Textbox()
143
+ savedAll = gr.Textbox()
144
+ audio = gr.Audio(label="Output", interactive=False)
145
+
146
+ b1 = gr.Button("Recognize Speech")
147
+ b2 = gr.Button("Classify Sentiment")
148
+ b3 = gr.Button("Save Speech to Text")
149
+ b4 = gr.Button("Retrieve All")
150
+
151
+ input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
152
+ input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
153
+
154
+ gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
155
+ button_gen_story = gr.Button("Generate Story")
156
+ gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
157
+ button_gen_images = gr.Button("Generate Images")
158
+ gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
159
+ button_gen_video = gr.Button("Generate Video")
160
+ output_generated_story = gr.Textbox(label="Generated Story")
161
+ output_gallery = gr.Gallery(label="Generated Story Images")
162
+ output_interpolation = gr.Video(label="Generated Video")
163
+
164
+ # Bind functions to buttons
165
+ button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
166
+ button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
167
+ button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
168
+
169
+ b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text )
170
+ b2.click(text_to_sentiment, inputs=text, outputs=label)
171
+ b3.click(upsert, inputs=text, outputs=saved)
172
+ b4.click(selectall, inputs=text, outputs=savedAll)
173
+
174
+ demo.launch(debug=True, enable_queue=True)