File size: 1,552 Bytes
7617596
 
3b61cce
64fb58a
3b61cce
 
 
64fb58a
7617596
64fb58a
 
 
7617596
6eaf487
64fb58a
3b61cce
b734d92
a8208b6
3b61cce
 
53b7b42
64fb58a
3b61cce
 
 
64fb58a
3b61cce
 
 
 
ef365f5
0312353
 
3b61cce
1dd8a60
3b61cce
 
 
806eb00
 
3b61cce
7617596
3b61cce
64fb58a
3b61cce
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env python

from __future__ import annotations

import os
import pathlib
import gradio as gr

from prismer_model import Model


def create_demo():
    model = Model()
    model.mode = 'caption'
    with gr.Row():
        with gr.Column():
            image = gr.Image(label='Input', type='filepath')
            model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base')
            run_button = gr.Button('Run')
        with gr.Column(scale=1.5):
            caption = gr.Text(label='Model Prediction')
            with gr.Row():
                depth = gr.Image(label='Depth')
                edge = gr.Image(label='Edge')
                normals = gr.Image(label='Normals')
            with gr.Row():
                segmentation = gr.Image(label='Segmentation')
                object_detection = gr.Image(label='Object Detection')
                ocr = gr.Image(label='OCR Detection')

    inputs = [image, model_name]
    outputs = [caption, depth, edge, normals, segmentation, object_detection, ocr]

    paths = sorted(pathlib.Path('prismer/images').glob('*'))
    examples = [[path.as_posix(), 'Prismer-Base'] for path in paths]
    gr.Examples(examples=examples,
                inputs=inputs,
                outputs=outputs,
                fn=model.run_caption,
                cache_examples=os.getenv('SYSTEM') == 'spaces')

    run_button.click(fn=model.run_caption, inputs=inputs, outputs=outputs)


if __name__ == '__main__':
    demo = create_demo()
    demo.queue().launch()