File size: 1,590 Bytes
63f899c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c79f6a
63f899c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17dad23
 
63f899c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import os
import shutil
from huggingface_hub import snapshot_download
import gradio as gr
from scripts.inference import inference_process
import argparse

# Download the repository contents into a directory
hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo")

# Define the new directory path for the pretrained models
new_dir = 'pretrained_models'

# Ensure the new directory exists
os.makedirs(new_dir, exist_ok=True)

# Move all contents from the downloaded directory to the new directory
for filename in os.listdir(hallo_dir):
    shutil.move(os.path.join(hallo_dir, filename), os.path.join(new_dir, filename))

def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=True)):
    # Construct the argparse.Namespace object with all necessary attributes
    args = argparse.Namespace(
        config='configs/inference/default.yaml',  # Adjust this path as necessary
        source_image=source_image.name,
        driving_audio=driving_audio.name,
        output='output.mp4',  # You might want to manage output paths dynamically
        pose_weight=1.0,
        face_weight=1.0,
        lip_weight=1.0,
        face_expand_ratio=1.2,
        checkpoint=None  # Adjust or set this according to your checkpointing strategy
    )
    
    # Call the imported function
    inference_process(args)
    
    # Return output or path to output
    return 'output.mp4'  # Modify based on your output handling

iface = gr.Interface(
    fn=run_inference,
    inputs=[gr.Image(type="filepath"), gr.Audio(type="filepath")],
    outputs="video"
)

iface.launch()