audio_image / app.py
JuanjoRosquete's picture
Add application file
923fe92
raw
history blame
1.8 kB
##!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
print("Installing...")
os.system("pip install gradio")
os.system("pip install tf-keras")
os.system("pip install diffusers")
os.system("pip install accelerate")
os.system("pip install transformers")
os.system("pip install numpy")
os.system("pip install torch")
#os.system("pip install --upgrade pip")
print("Installing Finished!")
##!/usr/bin/python3
# -*- coding: utf-8 -*-
from transformers import pipeline
import gradio as gr
import os
import torch
import accelerate
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
model_id = "stabilityai/stable-diffusion-2"
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
image_model = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float32)
image_model = image_model.to("cpu")
model = pipeline("automatic-speech-recognition","facebook/wav2vec2-large-xlsr-53-spanish")
def transcribe_text_audio(mic=None, file=None):
if mic is not None:
audio = mic
elif file is not None:
audio = file
else:
return "No se ha detectado ninguna entrada de audio"
transcription = model(audio)["text"]
image = image_model(transcription).images[0]
image = image.convert("RGB")
return transcription, image
gr.Interface(
fn=transcribe_text_audio,
inputs=[
gr.Audio(sources=["microphone"], type="filepath"),
gr.Audio(sources=["upload"], type="filepath"),
],
outputs=[
gr.Textbox(label="Transcripci贸n del Audio"),
gr.Image(label="Imagen Generada")
],
title="[ESpa帽ol] - Audio -> Texto -> Imagen",
description="Esta aplicaci贸n transcribe el audio a texto para convertirlo en una imagen descriptiva."
).launch()