File size: 1,364 Bytes
b547275
 
 
 
 
 
5f9f087
b547275
 
 
 
 
 
e8046ba
5f9f087
21ea932
b3febd2
4017c92
613cde2
4017c92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8046ba
4017c92
e8046ba
4017c92
 
b73c954
 
4017c92
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
##!/usr/bin/python3
# -*- coding: utf-8 -*-
import os

print("Installing correct gradio version...")
os.system("pip uninstall -y gradio")
os.system("pip install -y torch")
os.system("pip install gradio==3.50.0")
print("Installing Finished!")

##!/usr/bin/python3
# -*- coding: utf-8 -*-

import gradio as gr
import os
import torch
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler


model_id = "stabilityai/stable-diffusion-2"

scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
image_model = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)
image_model = image_model.to("cuda")

model = pipeline("automatic-speech-recognition","facebook/wav2vec2-large-xlsr-53-spanish")

def transcribe_text_audio(mic=None, file=None):
    if mic is not None:
        audio = mic
    elif file is not None:
        audio = file
    else:
        return "No se ha detectado ninguna entrada de audio"
    transcription = model(audio)["text"]

    image = image_model(transcription).images[0]

    image = image.convert("RGB")
    return transcription, image


gr.Interface(
    fn=transcribe_text_audio,
    inputs=[
        gr.Audio(sources=["microphone"], type="filepath"),
        gr.Audio(sources=["upload"], type="filepath"),
    ],
    outputs=["text", "image"],
).launch()