rsatish1110's picture
Create new file
history blame contribute delete
No virus
6.72 kB
import gradio as gr
from transformers import pipeline
import io, base64
from PIL import Image
import numpy as np
import tensorflow as tf
import mediapy
import os
import sys
from huggingface_hub import snapshot_download
import streamlit as st
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
import tempfile
from typing import Optional
import numpy as np
from TTS.utils.manage import ModelManager
from TTS.utils.synthesizer import Synthesizer
# firestore singleton is a cached multiuser instance to persist shared crowdsource memory
def get_db_firestore():
cred = credentials.Certificate('test.json')
firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
db = firestore.client()
return db
#start firestore singleton
db = get_db_firestore()
# create ASR ML pipeline
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
# create Text Classification pipeline
classifier = pipeline("text-classification")
# create text generator pipeline
story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
# transcribe function
def transcribe(audio):
text = asr(audio)["text"]
return text
def speech_to_text(speech):
text = asr(speech)["text"]
return text
def text_to_sentiment(text):
sentiment = classifier(text)[0]["label"]
return sentiment
def upsert(text):
date_time =str(
doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
doc_ref.set({u'firefield': 'Recognize Speech', u'first': '', u'last': text, u'born': date_time,})
saved = select('Text2SpeechSentimentSave', date_time)
# check it here:
return saved
def select(collection, document):
doc_ref = db.collection(collection).document(document)
doc = doc_ref.get()
docid = ("The id is: ",
contents = ("The contents are: ", doc.to_dict())
return contents
def selectall(text):
docs = db.collection('Text2SpeechSentimentSave').stream()
for doc in docs:
r=(f'{} => {doc.to_dict()}')
doclist += r
return doclist
# story gen
def generate_story(choice, input_text):
query = "<BOS> <{0}> {1}".format(choice, input_text)
generated_text = story_gen(query)
generated_text = generated_text[0]['generated_text']
generated_text = generated_text.split('> ')[2]
return generated_text
# images gen
def generate_images(text):
image_bytes = image_gen(text, steps, width, height, num_images, diversity)
generated_images = []
for image in image_bytes[1]:
image_str = image[0]
image_str = image_str.replace("data:image/png;base64,","")
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img =
return generated_images
# reductionism - interpolate 4 images - todo - unhardcode the pattern
def generate_interpolation(gallery):
times_to_interpolate = 4
generated_images = []
for image_str in gallery:
image_str = image_str.replace("data:image/png;base64,","")
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img =
input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
mediapy.write_video("out.mp4", frames, fps=15)
return "out.mp4"
# image generator
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
# video generator
os.system("git clone")
from eval import interpolator, util
ffmpeg_path = util.get_ffmpeg_path()
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)
demo = gr.Blocks()
with demo:
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
text = gr.Textbox()
label = gr.Label()
saved = gr.Textbox()
savedAll = gr.Textbox()
audio = gr.Audio(label="Output", interactive=False)
b1 = gr.Button("Recognize Speech")
b2 = gr.Button("Classify Sentiment")
b3 = gr.Button("Save Speech to Text")
b4 = gr.Button("Retrieve All")
input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
button_gen_story = gr.Button("Generate Story")
gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
button_gen_images = gr.Button("Generate Images")
gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
button_gen_video = gr.Button("Generate Video")
output_generated_story = gr.Textbox(label="Generated Story")
output_gallery = gr.Gallery(label="Generated Story Images")
output_interpolation = gr.Video(label="Generated Video")
# Bind functions to buttons, inputs=[input_story_type , input_start_text], outputs=output_generated_story), inputs=output_generated_story, outputs=output_gallery), inputs=output_gallery, outputs=output_interpolation), inputs=audio_file, outputs=input_start_text ), inputs=text, outputs=label), inputs=text, outputs=saved), inputs=text, outputs=savedAll)
demo.launch(debug=True, enable_queue=True)