|
import gradio as gr |
|
from transformers import pipeline |
|
import io, base64 |
|
from PIL import Image |
|
import numpy as np |
|
import tensorflow as tf |
|
import mediapy |
|
import os |
|
import sys |
|
from huggingface_hub import snapshot_download |
|
|
|
import streamlit as st |
|
import firebase_admin |
|
from firebase_admin import credentials |
|
from firebase_admin import firestore |
|
import datetime |
|
import tempfile |
|
from typing import Optional |
|
import numpy as np |
|
from TTS.utils.manage import ModelManager |
|
from TTS.utils.synthesizer import Synthesizer |
|
|
|
|
|
|
|
@st.experimental_singleton |
|
def get_db_firestore(): |
|
cred = credentials.Certificate('test.json') |
|
firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',}) |
|
db = firestore.client() |
|
return db |
|
|
|
|
|
db = get_db_firestore() |
|
|
|
|
|
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h") |
|
|
|
|
|
classifier = pipeline("text-classification") |
|
|
|
|
|
story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator") |
|
|
|
|
|
def transcribe(audio): |
|
text = asr(audio)["text"] |
|
return text |
|
|
|
def speech_to_text(speech): |
|
text = asr(speech)["text"] |
|
return text |
|
|
|
def text_to_sentiment(text): |
|
sentiment = classifier(text)[0]["label"] |
|
return sentiment |
|
|
|
def upsert(text): |
|
date_time =str(datetime.datetime.today()) |
|
doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time) |
|
doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,}) |
|
saved = select('Text2SpeechSentimentSave', date_time) |
|
|
|
return saved |
|
|
|
def select(collection, document): |
|
doc_ref = db.collection(collection).document(document) |
|
doc = doc_ref.get() |
|
docid = ("The id is: ", doc.id) |
|
contents = ("The contents are: ", doc.to_dict()) |
|
return contents |
|
|
|
def selectall(text): |
|
docs = db.collection('Text2SpeechSentimentSave').stream() |
|
doclist='' |
|
for doc in docs: |
|
r=(f'{doc.id} => {doc.to_dict()}') |
|
doclist += r |
|
return doclist |
|
|
|
|
|
def generate_story(choice, input_text): |
|
query = "<BOS> <{0}> {1}".format(choice, input_text) |
|
generated_text = story_gen(query) |
|
generated_text = generated_text[0]['generated_text'] |
|
generated_text = generated_text.split('> ')[2] |
|
return generated_text |
|
|
|
|
|
def generate_images(text): |
|
steps=50 |
|
width=256 |
|
height=256 |
|
num_images=4 |
|
diversity=6 |
|
image_bytes = image_gen(text, steps, width, height, num_images, diversity) |
|
generated_images = [] |
|
for image in image_bytes[1]: |
|
image_str = image[0] |
|
image_str = image_str.replace("data:image/png;base64,","") |
|
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) |
|
img = Image.open(io.BytesIO(decoded_bytes)) |
|
generated_images.append(img) |
|
return generated_images |
|
|
|
|
|
def generate_interpolation(gallery): |
|
times_to_interpolate = 4 |
|
generated_images = [] |
|
for image_str in gallery: |
|
image_str = image_str.replace("data:image/png;base64,","") |
|
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) |
|
img = Image.open(io.BytesIO(decoded_bytes)) |
|
generated_images.append(img) |
|
generated_images[0].save('frame_0.png') |
|
generated_images[1].save('frame_1.png') |
|
generated_images[2].save('frame_2.png') |
|
generated_images[3].save('frame_3.png') |
|
input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"] |
|
frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator)) |
|
mediapy.write_video("out.mp4", frames, fps=15) |
|
return "out.mp4" |
|
|
|
|
|
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion") |
|
|
|
|
|
os.system("git clone https://github.com/google-research/frame-interpolation") |
|
sys.path.append("frame-interpolation") |
|
from eval import interpolator, util |
|
|
|
ffmpeg_path = util.get_ffmpeg_path() |
|
mediapy.set_ffmpeg(ffmpeg_path) |
|
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") |
|
interpolator = interpolator.Interpolator(model, None) |
|
|
|
demo = gr.Blocks() |
|
with demo: |
|
|
|
audio_file = gr.inputs.Audio(source="microphone", type="filepath") |
|
text = gr.Textbox() |
|
label = gr.Label() |
|
saved = gr.Textbox() |
|
savedAll = gr.Textbox() |
|
audio = gr.Audio(label="Output", interactive=False) |
|
|
|
b1 = gr.Button("Recognize Speech") |
|
b2 = gr.Button("Classify Sentiment") |
|
b3 = gr.Button("Save Speech to Text") |
|
b4 = gr.Button("Retrieve All") |
|
|
|
input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre") |
|
input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text") |
|
|
|
gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!") |
|
button_gen_story = gr.Button("Generate Story") |
|
gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)") |
|
button_gen_images = gr.Button("Generate Images") |
|
gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!") |
|
button_gen_video = gr.Button("Generate Video") |
|
output_generated_story = gr.Textbox(label="Generated Story") |
|
output_gallery = gr.Gallery(label="Generated Story Images") |
|
output_interpolation = gr.Video(label="Generated Video") |
|
|
|
|
|
button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story) |
|
button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery) |
|
button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation) |
|
|
|
b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text ) |
|
b2.click(text_to_sentiment, inputs=text, outputs=label) |
|
b3.click(upsert, inputs=text, outputs=saved) |
|
b4.click(selectall, inputs=text, outputs=savedAll) |
|
|
|
demo.launch(debug=True, enable_queue=True) |