Spaces:
Runtime error
Runtime error
File size: 6,707 Bytes
4f27655 dee841c 4f27655 c9c1122 ed0f471 ff41990 561b096 a8505b9 561b096 a8505b9 561b096 5a48628 dee841c 561b096 b3f66a0 561b096 a8505b9 561b096 a8505b9 561b096 a8505b9 561b096 a8505b9 561b096 a8505b9 4f948e9 a8505b9 dee841c 57b4514 b3f66a0 dee841c 57b4514 b3f66a0 70096ef b3f66a0 70096ef dee841c 4f948e9 11065bd 4f948e9 a8505b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
from transformers import pipeline
import io, base64
from PIL import Image
import numpy as np
import tensorflow as tf
import mediapy
import os
import sys
from huggingface_hub import snapshot_download
import streamlit
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
import tempfile
from typing import Optional
import numpy as np
from TTS.utils.manage import ModelManager
from TTS.utils.synthesizer import Synthesizer
# firestore singleton is a cached multiuser instance to persist shared crowdsource memory
@st.experimental_singleton
def get_db_firestore():
cred = credentials.Certificate('test.json')
firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
db = firestore.client()
return db
#start firestore singleton
db = get_db_firestore()
# create ASR ML pipeline
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
# create Text Classification pipeline
classifier = pipeline("text-classification")
# create text generator pipeline
story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
# transcribe function
def transcribe(audio):
text = asr(audio)["text"]
return text
def speech_to_text(speech):
text = asr(speech)["text"]
return text
def text_to_sentiment(text):
sentiment = classifier(text)[0]["label"]
return sentiment
def upsert(text):
date_time =str(datetime.datetime.today())
doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
saved = select('Text2SpeechSentimentSave', date_time)
# check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
return saved
def select(collection, document):
doc_ref = db.collection(collection).document(document)
doc = doc_ref.get()
docid = ("The id is: ", doc.id)
contents = ("The contents are: ", doc.to_dict())
return contents
def selectall(text):
docs = db.collection('Text2SpeechSentimentSave').stream()
doclist=''
for doc in docs:
r=(f'{doc.id} => {doc.to_dict()}')
doclist += r
return doclist
# story gen
def generate_story(choice, input_text):
query = "<BOS> <{0}> {1}".format(choice, input_text)
generated_text = story_gen(query)
generated_text = generated_text[0]['generated_text']
generated_text = generated_text.split('> ')[2]
return generated_text
# images gen
def generate_images(text):
steps=50
width=256
height=256
num_images=4
diversity=6
image_bytes = image_gen(text, steps, width, height, num_images, diversity)
generated_images = []
for image in image_bytes[1]:
image_str = image[0]
image_str = image_str.replace("data:image/png;base64,","")
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img = Image.open(io.BytesIO(decoded_bytes))
generated_images.append(img)
return generated_images
# reductionism - interpolate 4 images - todo - unhardcode the pattern
def generate_interpolation(gallery):
times_to_interpolate = 4
generated_images = []
for image_str in gallery:
image_str = image_str.replace("data:image/png;base64,","")
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img = Image.open(io.BytesIO(decoded_bytes))
generated_images.append(img)
generated_images[0].save('frame_0.png')
generated_images[1].save('frame_1.png')
generated_images[2].save('frame_2.png')
generated_images[3].save('frame_3.png')
input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
mediapy.write_video("out.mp4", frames, fps=15)
return "out.mp4"
# image generator
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
# video generator
os.system("git clone https://github.com/google-research/frame-interpolation")
sys.path.append("frame-interpolation")
from eval import interpolator, util
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)
demo = gr.Blocks()
with demo:
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
text = gr.Textbox()
label = gr.Label()
saved = gr.Textbox()
savedAll = gr.Textbox()
audio = gr.Audio(label="Output", interactive=False)
b1 = gr.Button("Recognize Speech")
b2 = gr.Button("Classify Sentiment")
b3 = gr.Button("Save Speech to Text")
# b4 = gr.Button("Retrieve All")
b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text )
b2.click(text_to_sentiment, inputs=text, outputs=label)
b3.click(upsert, inputs=text, outputs=saved)
# b4.click(selectall, inputs=text, outputs=savedAll)
input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
button_gen_story = gr.Button("Generate Story")
gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
button_gen_images = gr.Button("Generate Images")
gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
button_gen_video = gr.Button("Generate Video")
output_generated_story = gr.Textbox(label="Generated Story")
output_gallery = gr.Gallery(label="Generated Story Images")
output_interpolation = gr.Video(label="Generated Video")
# Bind functions to buttons
button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
demo.launch(debug=True, enable_queue=True) |