File size: 4,074 Bytes
f77687d
 
 
 
 
f9d3dcc
f77687d
 
18eb0a8
f77687d
18eb0a8
4b7c5a5
18eb0a8
f9d3dcc
18eb0a8
 
b46e8b2
f9d3dcc
b46e8b2
 
f77687d
 
4e10b60
 
 
 
 
 
 
18eb0a8
 
 
 
 
f77687d
4e10b60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c647615
4e10b60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f77687d
18eb0a8
8794cba
 
 
 
 
 
 
 
 
 
 
 
f77687d
a48ee33
64d4127
4e10b60
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python

from __future__ import annotations

import os
import datetime

import gradio as gr
import spaces


@spaces.GPU(duration=60 * 3)
def run_on_gpu(input_shape, text, seed, guidance_scale):
    print('Starting inference at {}'.format(datetime.datetime.now()))
    from model import Model
    model = Model()
    res_generator = model.run(shape_path=input_shape, text=text, seed=seed, guidance_scale=guidance_scale)
    print('Finished inference at {}'.format(datetime.datetime.now()))
    return list(res_generator)



def main():
    DESCRIPTION = '''# [TEXTure](https://github.com/TEXTurePaper/TEXTurePaper)
    
    - This demo only accepts as input `.obj` files with less than 100,000 faces.
    - Inference takes about 10 minutes on a T4 GPU.
    '''
    if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
        DESCRIPTION += (f'\n<p>For faster inference without waiting in queue, '
                        f'you may duplicate the space and upgrade to GPU in settings. '
                        f'<a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true">'
                        f'<img style="display: inline; margin-top: 0em; margin-bottom: 0em" '
                        f'src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>')

    with gr.Blocks(css='style.css') as demo:
        gr.Markdown(DESCRIPTION)
        with gr.Row():
            with gr.Column():
                input_shape = gr.Model3D(label='Input 3D mesh')
                text = gr.Text(label='Text')
                seed = gr.Slider(label='Seed',
                                 minimum=0,
                                 maximum=100000,
                                 value=3,
                                 step=1)
                guidance_scale = gr.Slider(label='Guidance scale',
                                           minimum=0,
                                           maximum=50,
                                           value=7.5,
                                           step=0.1)
                run_button = gr.Button('Run')
            with gr.Column():
                progress_text = gr.Text(label='Progress')
                with gr.Tabs():
                    with gr.TabItem(label='Images from each viewpoint'):
                        viewpoint_images = gr.Gallery(show_label=False, columns=4)
                    with gr.TabItem(label='Result 3D model'):
                        result_3d_model = gr.Model3D(show_label=False)
                    with gr.TabItem(label='Output mesh file'):
                        output_file = gr.File(show_label=False)
        with gr.Row():
            examples = [
                ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
                ['shapes/dragon2.obj', 'a photo of a dragon', 0, 7.5],
                ['shapes/eagle.obj', 'a photo of an eagle', 0, 7.5],
                ['shapes/napoleon.obj', 'a photo of Napoleon Bonaparte', 3, 7.5],
                ['shapes/nascar.obj', 'A next gen nascar', 2, 10],
            ]
            gr.Examples(examples=examples,
                        inputs=[
                            input_shape,
                            text,
                            seed,
                            guidance_scale,
                        ],
                        outputs=[
                            result_3d_model,
                            output_file,
                        ],
                        cache_examples=False)

        run_button.click(fn=run_on_gpu,
                         inputs=[
                             input_shape,
                             text,
                             seed,
                             guidance_scale,
                         ],
                         outputs=[
                             viewpoint_images,
                             result_3d_model,
                             output_file,
                             progress_text,
                         ])

    demo.queue(max_size=5)
    demo.launch(debug=True)


if __name__ == '__main__':
    main()