hysts commited on
Commit
a962fcd
β€’
1 Parent(s): 285c0b5
Files changed (2) hide show
  1. app_caption.py +31 -29
  2. app_vqa.py +37 -35
app_caption.py CHANGED
@@ -9,37 +9,39 @@ import gradio as gr
9
  from prismer_model import Model
10
 
11
 
12
- def create_demo():
13
  model = Model()
14
  model.mode = 'caption'
15
- with gr.Row():
16
- with gr.Column():
17
- image = gr.Image(label='Input', type='filepath')
18
- model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base')
19
- run_button = gr.Button('Run')
20
- with gr.Column(scale=1.5):
21
- caption = gr.Text(label='Model Prediction')
22
- with gr.Row():
23
- depth = gr.Image(label='Depth')
24
- edge = gr.Image(label='Edge')
25
- normals = gr.Image(label='Normals')
26
- with gr.Row():
27
- segmentation = gr.Image(label='Segmentation')
28
- object_detection = gr.Image(label='Object Detection')
29
- ocr = gr.Image(label='OCR Detection')
30
-
31
- inputs = [image, model_name]
32
- outputs = [caption, depth, edge, normals, segmentation, object_detection, ocr]
33
-
34
- paths = sorted(pathlib.Path('prismer/images').glob('*'))
35
- examples = [[path.as_posix(), 'Prismer-Base'] for path in paths]
36
- gr.Examples(examples=examples,
37
- inputs=inputs,
38
- outputs=outputs,
39
- fn=model.run_caption,
40
- cache_examples=os.getenv('SYSTEM') == 'spaces')
41
-
42
- run_button.click(fn=model.run_caption, inputs=inputs, outputs=outputs)
 
 
43
 
44
 
45
  if __name__ == '__main__':
 
9
  from prismer_model import Model
10
 
11
 
12
+ def create_demo() -> gr.Blocks:
13
  model = Model()
14
  model.mode = 'caption'
15
+ with gr.Blocks() as demo:
16
+ with gr.Row():
17
+ with gr.Column():
18
+ image = gr.Image(label='Input', type='filepath')
19
+ model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base')
20
+ run_button = gr.Button('Run')
21
+ with gr.Column(scale=1.5):
22
+ caption = gr.Text(label='Model Prediction')
23
+ with gr.Row():
24
+ depth = gr.Image(label='Depth')
25
+ edge = gr.Image(label='Edge')
26
+ normals = gr.Image(label='Normals')
27
+ with gr.Row():
28
+ segmentation = gr.Image(label='Segmentation')
29
+ object_detection = gr.Image(label='Object Detection')
30
+ ocr = gr.Image(label='OCR Detection')
31
+
32
+ inputs = [image, model_name]
33
+ outputs = [caption, depth, edge, normals, segmentation, object_detection, ocr]
34
+
35
+ paths = sorted(pathlib.Path('prismer/images').glob('*'))
36
+ examples = [[path.as_posix(), 'Prismer-Base'] for path in paths]
37
+ gr.Examples(examples=examples,
38
+ inputs=inputs,
39
+ outputs=outputs,
40
+ fn=model.run_caption,
41
+ cache_examples=os.getenv('SYSTEM') == 'spaces')
42
+
43
+ run_button.click(fn=model.run_caption, inputs=inputs, outputs=outputs)
44
+ return demo
45
 
46
 
47
  if __name__ == '__main__':
app_vqa.py CHANGED
@@ -9,42 +9,44 @@ import gradio as gr
9
  from prismer_model import Model
10
 
11
 
12
- def create_demo():
13
  model = Model()
14
- with gr.Row():
15
- with gr.Column():
16
- image = gr.Image(label='Input', type='filepath')
17
- model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base')
18
- question = gr.Text(label='Question')
19
- run_button = gr.Button('Run')
20
- with gr.Column(scale=1.5):
21
- answer = gr.Text(label='Model Prediction')
22
- with gr.Row():
23
- depth = gr.Image(label='Depth')
24
- edge = gr.Image(label='Edge')
25
- normals = gr.Image(label='Normals')
26
- with gr.Row():
27
- segmentation = gr.Image(label='Segmentation')
28
- object_detection = gr.Image(label='Object Detection')
29
- ocr = gr.Image(label='OCR Detection')
30
-
31
- inputs = [image, model_name, question]
32
- outputs = [answer, depth, edge, normals, segmentation, object_detection, ocr]
33
-
34
- paths = sorted(pathlib.Path('prismer/images').glob('*'))
35
- ex_questions = ['What is the man on the left doing?',
36
- 'What is this person doing?',
37
- 'How many cows in this image?',
38
- 'What is the type of animal in this image?',
39
- 'What toy is it?']
40
- examples = [[path.as_posix(), 'Prismer-Base', ex_questions[i]] for i, path in enumerate(paths)]
41
- gr.Examples(examples=examples,
42
- inputs=inputs,
43
- outputs=outputs,
44
- fn=model.run_vqa,
45
- cache_examples=os.getenv('SYSTEM') == 'spaces')
46
-
47
- run_button.click(fn=model.run_vqa, inputs=inputs, outputs=outputs)
 
 
48
 
49
 
50
  if __name__ == '__main__':
 
9
  from prismer_model import Model
10
 
11
 
12
+ def create_demo() -> gr.Blocks:
13
  model = Model()
14
+ with gr.Blocks() as demo:
15
+ with gr.Row():
16
+ with gr.Column():
17
+ image = gr.Image(label='Input', type='filepath')
18
+ model_name = gr.Dropdown(label='Model', choices=['Prismer-Base', 'Prismer-Large'], value='Prismer-Base')
19
+ question = gr.Text(label='Question')
20
+ run_button = gr.Button('Run')
21
+ with gr.Column(scale=1.5):
22
+ answer = gr.Text(label='Model Prediction')
23
+ with gr.Row():
24
+ depth = gr.Image(label='Depth')
25
+ edge = gr.Image(label='Edge')
26
+ normals = gr.Image(label='Normals')
27
+ with gr.Row():
28
+ segmentation = gr.Image(label='Segmentation')
29
+ object_detection = gr.Image(label='Object Detection')
30
+ ocr = gr.Image(label='OCR Detection')
31
+
32
+ inputs = [image, model_name, question]
33
+ outputs = [answer, depth, edge, normals, segmentation, object_detection, ocr]
34
+
35
+ paths = sorted(pathlib.Path('prismer/images').glob('*'))
36
+ ex_questions = ['What is the man on the left doing?',
37
+ 'What is this person doing?',
38
+ 'How many cows in this image?',
39
+ 'What is the type of animal in this image?',
40
+ 'What toy is it?']
41
+ examples = [[path.as_posix(), 'Prismer-Base', ex_questions[i]] for i, path in enumerate(paths)]
42
+ gr.Examples(examples=examples,
43
+ inputs=inputs,
44
+ outputs=outputs,
45
+ fn=model.run_vqa,
46
+ cache_examples=os.getenv('SYSTEM') == 'spaces')
47
+
48
+ run_button.click(fn=model.run_vqa, inputs=inputs, outputs=outputs)
49
+ return demo
50
 
51
 
52
  if __name__ == '__main__':