File size: 2,316 Bytes
7df3707
 
 
6f78835
e528922
7df3707
 
 
 
 
 
 
 
d830582
7df3707
 
 
 
 
 
 
 
 
e528922
7df3707
 
 
 
 
 
 
 
 
 
e528922
7df3707
 
 
 
 
 
 
 
e528922
7df3707
 
 
 
 
 
 
 
 
 
 
 
e528922
7df3707
 
 
 
 
 
e528922
 
7df3707
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import string
import gradio as gr
import requests
def inference_chat(input_image,input_text):
    return input_text
    
with gr.Blocks(
    css="""
    .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
    #component-21 > div.wrap.svelte-w6rprc {height: 600px;}
    """
) as iface:
    state = gr.State([])
    #caption_output = None
    #gr.Markdown(title)
    #gr.Markdown(description)
    #gr.Markdown(article)

    with gr.Row():
        with gr.Column(scale=1):
            image_input = gr.Image(type="pil")

            with gr.Row():
                
                with gr.Column(scale=1):
                    chat_input = gr.Textbox(lines=1, label="VQA Input")
                    chat_input.submit(
                        inference_chat,
                        [
                            image_input,
                            chat_input,
                        ],
                        [ caption_output],
                    )
    
                    with gr.Row():
                        clear_button = gr.Button(value="Clear", interactive=True)
                        clear_button.click(
                            lambda: ("", [], []),
                            [],
                            [chat_input,  state],
                            queue=False,
                        )
    
                        submit_button = gr.Button(
                            value="Submit", interactive=True, variant="primary"
                        )
                        submit_button.click(
                            inference_chat,
                            [
                                image_input,
                                chat_input,
                            ],
                            [caption_output],
                        )
            
    
            image_input.change(
                lambda: ("", "", []),
                [],
                [ caption_output, state],
                queue=False,
            )
        with gr.Column():
            caption_output = gr.Textbox(lines=1, label="VQA Output")

   # examples = gr.Examples(
   #     examples=examples,
   #     inputs=[image_input, chat_input],
  #  )

iface.queue(concurrency_count=1, api_open=False, max_size=10)
iface.launch(enable_queue=True)