File size: 4,618 Bytes
930f89e
 
f22cb33
 
 
 
 
 
 
 
 
 
 
 
8c29b70
f22cb33
 
 
 
 
 
 
 
 
 
 
 
 
930f89e
f22cb33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
930f89e
f22cb33
930f89e
f22cb33
 
930f89e
 
f22cb33
 
930f89e
f22cb33
 
930f89e
f22cb33
 
 
 
930f89e
f22cb33
 
 
 
 
 
 
 
930f89e
f22cb33
 
2e08440
930f89e
 
 
 
 
 
 
 
 
 
f22cb33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
# refer to repo https://github.com/gradio-app/gradio/blob/main/demo/chatbot_multimodal/run.ipynb for enhancement

import PIL.Image
import gradio as gr
import base64
import time
import os
import google.generativeai as genai

import pathlib

txt_model = genai.GenerativeModel('gemini-pro')
vis_model = genai.GenerativeModel('gemini-pro-vision')

txt_prompt_1 = """The image contains the contents of a letter. I'd like to follow the request mentioned in the letter. Please provide 3 actionable items to assist me. When responding, use the following format:

# Sender and Subject #
1- Action 1 (no more than 20 words)
2- Action 2 (no more than 20 words)
3- Action 3 (no more than 20 words)

For example:
# From Richard regarding 'Shipping to Customer ABC' #
1- Pack Product A
2- Ship before 3:00 PM today
3- Notify Richard after shipment
"""

txt_display_1 = 'content of email'

import os

GOOGLE_API_KEY=os.getenv('GOOGLE_API_KEY')

genai.configure(api_key=GOOGLE_API_KEY)

# Image to Base 64 Converter
def image_to_base64(image_path):
    with open(image_path, 'rb') as img:
        encoded_string = base64.b64encode(img.read())
    return encoded_string.decode('utf-8')

# Function that takes User Inputs and displays it on ChatUI
def app2_query(history,txt,img):
    if not img:
        history += [(txt,None)]
        return history
    base64 = image_to_base64(img)
    data_url = f"data:image/jpeg;base64,{base64}"
    history += [(f"{txt} ![]({data_url})", None)]
    return history

# Function that takes User Inputs, generates Response and displays on Chat UI
def app2_response(history,text,img):
    if not img:
        response = txt_model.generate_content(text)
        history += [(None,response.text)]
        return history

    else:
        img = PIL.Image.open(img)
        response = vis_model.generate_content([text,img])
        history += [(None,response.text)]
        return history

# Function that takes User Inputs and displays it on ChatUI

def app1_query(img):
    if not img:
        return txt_prompt_1
    base64 = image_to_base64(img)
    data_url = f"data:image/jpeg;base64,{base64}"
    outputText = [(f"{txt_display_1} ![]({data_url})", None)]
    return outputText

# Function that takes User Inputs, generates Response and displays on Chat UI
def app1_response(img):
    if not img:
        response = txt_model.generate_content(txt_prompt_1)
        return response

    else:
        img = PIL.Image.open(img)
        response = vis_model.generate_content([txt_prompt_1,img])
        return response.text

        
# Interface Code- Selector method

def sentence_builder(animal, place):
    return f"""how many {animal}s from the {place} are shown in the picture?"""

# gradio block
    
with gr.Blocks(theme='snehilsanyal/scikit-learn') as app1:
    with gr.Column():    
        outputbox = gr.Textbox(label="here are the plans...")
        image_box = gr.Image(type="filepath")
        
    btn = gr.Button("Make a Plan")
    clicked = btn.click(app1_query,
                        [image_box],
                        outputbox
                        ).then(app1_response,
                                [image_box],
                                outputbox
                                )
    gr.Markdown("""
    # Make a Plan #
    
    	- screen capture (Win + shift + S)
        - click **Make a Plan** to upload
        - await LLM Bot (Gemini, in this case) response
        - receive THREE actionable items
    

    [demo](https://youtu.be/lJ4jIAEVRNY)

    """)

with gr.Blocks(theme='snehilsanyal/scikit-learn') as app2:
    gr.Markdown("check the image...")
    with gr.Row():
        image_box = gr.Image(type="filepath")
    
        chatbot = gr.Chatbot(
            scale = 2,
            height=750
        )
    text_box = gr.Dropdown(
                ["what is in the image", 
                 "provide alternative title for the image", 
                 "how many parts can be seen in the picture?",
                  "check ID and expiration date"], 
                 label="Select--", 
                 info="ask Bot"
            )

    btn = gr.Button("Submit")
    clicked = btn.click(app2_query,
                        [chatbot,text_box,image_box],
                        chatbot
                        ).then(app2_response,
                                [chatbot,text_box],
                                chatbot
                                )
with gr.Blocks(theme='snehilsanyal/scikit-learn') as demo:
    gr.Markdown("## Workflow Bot ##")
    gr.TabbedInterface([app1, app2], ["Make a Plan!", "Check This!"])

demo.queue()
demo.launch()