File size: 3,267 Bytes
4913d8e
 
 
ec4ad71
22fa11a
ec4ad71
4913d8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec4ad71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4913d8e
 
 
ec4ad71
4913d8e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import os 
import yaml
import tempfile
import huggingface_hub
import subprocess

huggingface_hub.hf_hub_download(
    repo_id='yzd-v/DWPose',
    filename='yolox_l.onnx',
    local_dir='./models/DWPose',
    local_dir_use_symlinks=False,
)

huggingface_hub.hf_hub_download(
    repo_id='yzd-v/DWPose',
    filename='dw-ll_ucoco_384.onnx',
    local_dir='./models/DWPose',
    local_dir_use_symlinks=False,
)

huggingface_hub.hf_hub_download(
    repo_id='ixaac/MimicMotion',
    filename='MimicMotion_1-1.pth',
    local_dir='./models',
    local_dir_use_symlinks=False,
)

def print_directory_contents(path):
    for root, dirs, files in os.walk(path):
        level = root.replace(path, '').count(os.sep)
        indent = ' ' * 4 * (level)
        print(f"{indent}{os.path.basename(root)}/")
        subindent = ' ' * 4 * (level + 1)
        for f in files:
            print(f"{subindent}{f}")

# Path to the directory you want to print
directory_path = './models'

# Print the directory contents
print_directory_contents(directory_path)

def infer(ref_video_in, ref_image_in):
    # Create a temporary directory
    with tempfile.TemporaryDirectory() as temp_dir:
        print("Temporary directory created:", temp_dir)
    
        # Define the values for the variables
        ref_video_path = ref_video_in
        ref_image_path = ref_image_in
        num_frames = 72
        resolution = 576
        frames_overlap = 6
        num_inference_steps = 25
        noise_aug_strength = 0
        guidance_scale = 2.0
        sample_stride = 2
        fps = 15
        seed = 42
    
        # Create the data structure
        data = {
            'base_model_path': 'stabilityai/stable-video-diffusion-img2vid-xt-1-1',
            'ckpt_path': 'models/MimicMotion_1-1.pth',
            'test_case': [
                {
                    'ref_video_path': ref_video_path,
                    'ref_image_path': ref_image_path,
                    'num_frames': num_frames,
                    'resolution': resolution,
                    'frames_overlap': frames_overlap,
                    'num_inference_steps': num_inference_steps,
                    'noise_aug_strength': noise_aug_strength,
                    'guidance_scale': guidance_scale,
                    'sample_stride': sample_stride,
                    'fps': fps,
                    'seed': seed
                }
            ]
        }
    
        # Define the file path
        file_path = os.path.join(temp_dir, 'config.yaml')
    
        # Write the data to a YAML file
        with open(file_path, 'w') as file:
            yaml.dump(data, file, default_flow_style=False)
    
        print("YAML file 'config.yaml' created successfully in", file_path)

        # Execute the inference command
        command = ['python', 'inference.py', '--inference_config', file_path]
        result = subprocess.run(command, capture_output=True, text=True)
    
        # Print the command output
        print("Command output:", result.stdout)
        print("Command errors:", result.stderr)
    
    
    return "done"

demo = gr.Interface(
    fn = infer,
    inputs = [gr.Video(type="filepath"), gr.Image(type="filepath")],
    outputs = [gr.Textbox()]
)

demo.launch()