Spaces:
Sleeping
Sleeping
Commit
路
b3d0e64
1
Parent(s):
e0274e2
Update app.py
Browse files
app.py
CHANGED
@@ -11,52 +11,63 @@ def get_thumbnail(image_path, width):
|
|
11 |
image.thumbnail((width, width))
|
12 |
return image
|
13 |
|
14 |
-
|
|
|
15 |
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
16 |
-
generator = pipeline('text-generation', model="salomonsky/deepSP")
|
17 |
-
tokenizer = GPT2Tokenizer.from_pretrained('salomonsky/deepSP')
|
18 |
|
19 |
def generate_output(name, date_of_birth, image):
|
20 |
-
prompt = f"Tu
|
21 |
input_tokens = tokenizer.encode(prompt, add_special_tokens=False)
|
22 |
input_text = tokenizer.decode(input_tokens)
|
23 |
gpt2_output = generator(input_text, max_length=120, do_sample=True, temperature=0.9)
|
24 |
generated_text = gpt2_output[0]['generated_text']
|
25 |
generated_text = generated_text.replace(input_text, "").strip()
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
|
34 |
command = f"python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {image} --audio audio.wav --outfile video.mp4 --nosmooth"
|
35 |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
36 |
if process.returncode != 0:
|
37 |
error_message = process.stderr.decode("utf-8")
|
38 |
-
return None, error_message
|
39 |
|
40 |
output_video_path = "video.mp4"
|
41 |
os.remove(temp_audio_path)
|
42 |
|
43 |
if os.path.isfile(output_video_path):
|
44 |
return output_video_path, None
|
45 |
-
|
46 |
return None, "No se pudo generar el video"
|
47 |
|
48 |
choices = ["1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg"]
|
49 |
thumbnail_width = "50px"
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
iface = gr.Interface(
|
52 |
fn=generate_output,
|
53 |
inputs=[
|
54 |
gr.inputs.Textbox(lines=1, label="Nombre", placeholder="Ingresa tu nombre"),
|
55 |
gr.inputs.Textbox(lines=1, label="Fecha de Nacimiento", placeholder="DD/MM/AAAA"),
|
56 |
-
gr.inputs.Radio(choices, label="Selecciona una
|
57 |
],
|
58 |
outputs=[
|
59 |
-
gr.outputs.Video(label="Respuesta
|
60 |
gr.outputs.Textbox(label="Mensaje de error", type="text")
|
61 |
],
|
62 |
title="Oraculo de Inteligencia Artifical v2.1",
|
|
|
11 |
image.thumbnail((width, width))
|
12 |
return image
|
13 |
|
14 |
+
generator = pipeline('text-generation', model="checkpoints")
|
15 |
+
tokenizer = GPT2Tokenizer.from_pretrained('checkpoints')
|
16 |
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
|
|
|
|
17 |
|
18 |
def generate_output(name, date_of_birth, image):
|
19 |
+
prompt = f"Tu carta astral de hoy {name} es:"
|
20 |
input_tokens = tokenizer.encode(prompt, add_special_tokens=False)
|
21 |
input_text = tokenizer.decode(input_tokens)
|
22 |
gpt2_output = generator(input_text, max_length=120, do_sample=True, temperature=0.9)
|
23 |
generated_text = gpt2_output[0]['generated_text']
|
24 |
generated_text = generated_text.replace(input_text, "").strip()
|
25 |
+
if len(gpt2_output) == 0 or 'generated_text' not in gpt2_output[0]:
|
26 |
+
return None, "No se pudo generar el texto."
|
27 |
|
28 |
+
try:
|
29 |
+
tts = gTTS(generated_text, lang='es')
|
30 |
+
temp_audio_path = "temp_audio.mp3"
|
31 |
+
tts.save(temp_audio_path)
|
32 |
+
audio_path = "audio.wav"
|
33 |
+
audio = AudioSegment.from_mp3(temp_audio_path)
|
34 |
+
audio.export(audio_path, format="wav")
|
35 |
+
print("Archivo de audio generado:", audio_path)
|
36 |
+
except Exception as e:
|
37 |
+
return None, f"No se pudo generar el audio: {str(e)}"
|
38 |
|
39 |
command = f"python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {image} --audio audio.wav --outfile video.mp4 --nosmooth"
|
40 |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
41 |
if process.returncode != 0:
|
42 |
error_message = process.stderr.decode("utf-8")
|
43 |
+
return None, f"No se pudo generar el video: {error_message}"
|
44 |
|
45 |
output_video_path = "video.mp4"
|
46 |
os.remove(temp_audio_path)
|
47 |
|
48 |
if os.path.isfile(output_video_path):
|
49 |
return output_video_path, None
|
|
|
50 |
return None, "No se pudo generar el video"
|
51 |
|
52 |
choices = ["1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg"]
|
53 |
thumbnail_width = "50px"
|
54 |
|
55 |
+
|
56 |
+
def error_message_fn(error_message):
|
57 |
+
if error_message is not None:
|
58 |
+
return gr.outputs.Textbox(text=error_message, placeholder="Error")
|
59 |
+
else:
|
60 |
+
return None
|
61 |
+
|
62 |
iface = gr.Interface(
|
63 |
fn=generate_output,
|
64 |
inputs=[
|
65 |
gr.inputs.Textbox(lines=1, label="Nombre", placeholder="Ingresa tu nombre"),
|
66 |
gr.inputs.Textbox(lines=1, label="Fecha de Nacimiento", placeholder="DD/MM/AAAA"),
|
67 |
+
gr.inputs.Radio(choices, label="Selecciona una vidente:", thumbnails=[get_thumbnail(image, 50) for image in choices])
|
68 |
],
|
69 |
outputs=[
|
70 |
+
gr.outputs.Video(label="Respuesta del Oraculo (un minuto aproximadamente)").style(width=256),
|
71 |
gr.outputs.Textbox(label="Mensaje de error", type="text")
|
72 |
],
|
73 |
title="Oraculo de Inteligencia Artifical v2.1",
|