File size: 13,842 Bytes
800ade2
f9e72d0
30f2bef
2773f58
 
f9e72d0
2773f58
b0cee6d
800ade2
8507790
 
 
 
 
 
 
 
 
 
 
 
f9e72d0
ae2c89d
f67adcd
e9ecd38
 
 
 
 
 
 
 
 
 
 
 
 
f67adcd
 
 
 
 
 
 
 
 
 
 
 
ac0b0ba
 
 
f67adcd
 
 
 
 
 
 
 
 
 
 
 
 
8fc7477
f9e72d0
8fc7477
f2216d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8fc7477
 
4cfb376
d38645c
4cfb376
d38645c
4cfb376
 
 
70fa6be
 
d38645c
70fa6be
 
 
 
ce3f2b4
70fa6be
 
 
 
 
 
 
 
 
 
 
 
 
 
50668be
8507790
b0cee6d
70fa6be
 
8fc7477
 
 
4cfb376
8fc7477
 
 
 
 
75e7686
8fc7477
 
08fb2c9
000d979
4cfb376
70fa6be
58d8b0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4cfb376
970f64b
a82c90c
970f64b
 
70fa6be
d38645c
 
 
bb45c2d
4cfb376
 
 
ac0b0ba
f67adcd
 
ba3183a
ae2c89d
9308288
8507790
9308288
4a35755
6dc09e2
f67adcd
 
ba3183a
4cfb376
58d8b0b
4cfb376
 
70fa6be
581ffca
 
 
 
 
 
 
70fa6be
581ffca
70fa6be
f2216d5
 
 
 
 
 
 
70fa6be
7bbc69f
b4c3dc1
 
 
 
 
70fa6be
96ea2c7
70fa6be
 
 
 
 
 
 
 
 
f2216d5
dc9674b
96f8aa7
 
 
70fa6be
 
 
 
22c721b
 
70fa6be
 
75e7686
7b4f43b
 
 
1d3b03f
85d5108
 
bd891a1
15c5816
f49f244
d38645c
 
 
75e7686
ba3183a
 
 
 
70fa6be
 
 
 
 
bb45c2d
70fa6be
 
 
 
 
 
 
 
 
 
 
56f3b9b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
import os 
import torch
import openai
import requests
import gradio as gr
import transformers
from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor
#from peft import PeftModel


if torch.cuda.is_available():
    device = "cuda"
else:
    device = "cpu"

try:
    if torch.backends.mps.is_available():
        device = "mps"
except:
    pass

## CoT prompts

def _add_markup(table):
    try:
        parts = [p.strip() for p in table.splitlines(keepends=False)]
        if parts[0].startswith('TITLE'):
            result = f"Title: {parts[0].split(' | ')[1].strip()}\n"
            rows = parts[1:]
        else:
            result = ''
            rows = parts
        prefixes = ['Header: '] + [f'Row {i+1}: ' for i in range(len(rows) - 1)]
        return result + '\n'.join(prefix + row for prefix, row in zip(prefixes, rows))
    except:
        # just use the raw table if parsing fails
        return table

_TABLE = """Year | Democrats | Republicans | Independents
2004 | 68.1% | 45.0% | 53.0%
2006 | 58.0% | 42.0% | 53.0%
2007 | 59.0% | 38.0% | 45.0%
2009 | 72.0% | 49.0% | 60.0%
2011 | 71.0% | 51.2% | 58.0%
2012 | 70.0% | 48.0% | 53.0%
2013 | 72.0% | 41.0% | 60.0%"""

_INSTRUCTION = 'Read the table below to answer the following questions.'

_TEMPLATE = f"""First read an example then the complete question for the second table.
------------
{_INSTRUCTION}
{_add_markup(_TABLE)}
Q: In which year republicans have the lowest favor rate?
A: Let's find the column of republicans. Then let's extract the favor rates, they [45.0, 42.0, 38.0, 49.0, 51.2, 48.0, 41.0]. The smallest number is 38.0, that's Row 3.  Row 3 is year 2007. The answer is 2007.
Q: What is the sum of Democrats' favor rates of 2004, 2012, and 2013?
A: Let's find the rows of years 2004, 2012, and 2013. We find Row 1, 6, 7. The favor dates of Demoncrats on that 3 rows are 68.1, 70.0, and 72.0. 68.1+70.0+72=210.1. The answer is 210.1.
Q: By how many points do Independents surpass Republicans in the year of 2011?
A: Let's find the row with year = 2011. We find Row 5. We extract Independents and Republicans' numbers. They are 58.0 and 51.2. 58.0-51.2=6.8. The answer is 6.8.
Q: Which group has the overall worst performance?
A: Let's sample a couple of years. In Row 1, year 2004, we find Republicans having the lowest favor rate 45.0 (since 45.0<68.1, 45.0<53.0). In year 2006, Row 2, we find Republicans having the lowest favor rate 42.0 (42.0<58.0, 42.0<53.0). The trend continues to other years. The answer is Republicans.
Q: Which party has the second highest favor rates in 2007?
A: Let's find the row of year 2007, that's Row 3. Let's extract the numbers on Row 3: [59.0, 38.0, 45.0]. 45.0 is the second highest. 45.0 is the number of Independents. The answer is Independents.
{_INSTRUCTION}"""


## alpaca-lora

# assert (
#     "LlamaTokenizer" in transformers._import_structure["models.llama"]
# ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
# from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig

# tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")

# BASE_MODEL = "decapoda-research/llama-7b-hf"
# LORA_WEIGHTS = "tloen/alpaca-lora-7b"

# if device == "cuda":
#     model = LlamaForCausalLM.from_pretrained(
#         BASE_MODEL,
#         load_in_8bit=False,
#         torch_dtype=torch.float16,
#         device_map="auto",
#     )
#     model = PeftModel.from_pretrained(
#         model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
#     )
# elif device == "mps":
#     model = LlamaForCausalLM.from_pretrained(
#         BASE_MODEL,
#         device_map={"": device},
#         torch_dtype=torch.float16,
#     )
#     model = PeftModel.from_pretrained(
#         model,
#         LORA_WEIGHTS,
#         device_map={"": device},
#         torch_dtype=torch.float16,
#     )
# else:
#     model = LlamaForCausalLM.from_pretrained(
#         BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
#     )
#     model = PeftModel.from_pretrained(
#         model,
#         LORA_WEIGHTS,
#         device_map={"": device},
#     )


# if device != "cpu":
#     model.half()
# model.eval()
# if torch.__version__ >= "2":
#     model = torch.compile(model)


## FLAN-UL2
HF_TOKEN = os.environ.get("API_TOKEN", None)
API_URL = "https://api-inference.huggingface.co/models/google/flan-ul2"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def query(payload):
	response = requests.post(API_URL, headers=headers, json=payload)
	return response.json()

## OpenAI models
openai.api_key = os.environ.get("OPENAI_TOKEN", None) 
def set_openai_api_key(api_key):
    if api_key and api_key.startswith("sk-") and len(api_key) > 50:
        openai.api_key = api_key

def get_response_from_openai(prompt, model="gpt-3.5-turbo", max_output_tokens=256):
  messages = [{"role": "assistant", "content": prompt}]
  response = openai.ChatCompletion.create(
      model=model,
      messages=messages,
      temperature=0.7,
      max_tokens=max_output_tokens,
      top_p=1,
      frequency_penalty=0,
      presence_penalty=0,
  )
  ret = response.choices[0].message['content']
  return ret

## deplot models
model_deplot = Pix2StructForConditionalGeneration.from_pretrained("google/deplot", torch_dtype=torch.bfloat16)
if device == "cuda":
    model_deplot = model_deplot.to(0)
processor_deplot = Pix2StructProcessor.from_pretrained("google/deplot")

def evaluate(
    table,
    question,
    llm="alpaca-lora",
    input=None,
    temperature=0.1,
    top_p=0.75,
    top_k=40,
    num_beams=4,
    max_new_tokens=128,
    **kwargs,
):
    prompt_0shot = _INSTRUCTION + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
    prompt = _TEMPLATE + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
    if llm == "alpaca-lora":
        inputs = tokenizer(prompt, return_tensors="pt")
        input_ids = inputs["input_ids"].to(device)
        generation_config = GenerationConfig(
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            num_beams=num_beams,
            **kwargs,
        )
        with torch.no_grad():
            generation_output = model.generate(
                input_ids=input_ids,
                generation_config=generation_config,
                return_dict_in_generate=True,
                output_scores=True,
                max_new_tokens=max_new_tokens,
            )
        s = generation_output.sequences[0]
        output = tokenizer.decode(s)
    elif llm == "flan-ul2":
        try:
            output = query({"inputs": prompt_0shot})[0]["generated_text"]
        except:
            output = "<flan-ul2 inference API error - try later>"
    elif llm == "gpt-3.5-turbo":
        try:
            output = get_response_from_openai(prompt_0shot)
        except:
            output = "<Remember to input your OpenAI API key ☺>"
    else:
        RuntimeError(f"No such LLM: {llm}")
        
    return output


def process_document(image, question, llm):
    # image = Image.open(image)
    inputs = processor_deplot(images=image, text="Generate the underlying data table for the figure below:", return_tensors="pt").to(torch.bfloat16)
    if device == "cuda":
        inputs = inputs.to(0)
    predictions = model_deplot.generate(**inputs, max_new_tokens=512)
    table = processor_deplot.decode(predictions[0], skip_special_tokens=True).replace("<0x0A>", "\n")

    # send prompt+table to LLM
    res = evaluate(table, question, llm=llm)
    if llm == "alpaca-lora":
        return [table, res.split("A:")[-1]]
    else:
        return [table, res]

# theme = gr.themes.Monochrome(
#     primary_hue="indigo",
#     secondary_hue="blue",
#     neutral_hue="slate",
#     radius_size=gr.themes.sizes.radius_sm,
#     font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
# )

with gr.Blocks(theme="gradio/soft") as demo:
    with gr.Column():
      # gr.Markdown(
      #       """<h1><center>DePlot+LLM: Multimodal chain-of-thought reasoning on plots</center></h1>
      #       <p>
      #       This is a demo of DePlot+LLM for QA and summarisation. <a href='https://arxiv.org/abs/2212.10505' target='_blank'>DePlot</a> is an image-to-text model that converts plots and charts into a textual sequence. The sequence then is used to prompt LLM for chain-of-thought reasoning. The current underlying LLMs are <a href='https://huggingface.co/spaces/tloen/alpaca-lora' target='_blank'>alpaca-lora</a>, <a href='https://huggingface.co/google/flan-ul2' target='_blank'>flan-ul2</a>, and <a href='https://openai.com/blog/chatgpt' target='_blank'>gpt-3.5-turbo</a>. To use it, simply upload your image and type a question or instruction and click 'submit', or click one of the examples to load them. Read more at the links below.
      #       </p>
      #       """
      #       )
      gr.Markdown(
            """<h1><center>DePlot+LLM: Multimodal chain-of-thought reasoning on plot📊</center></h1>
            <h3>
            <center>
            <a href='https://arxiv.org/abs/2212.09662' target='_blank'>[paper]</a> <a href='https://ai.googleblog.com/2023/05/foundation-models-for-reasoning-on.html' target='_blank'>[google-ai blog]</a> <a href='https://github.com/google-research/google-research/tree/master/deplot' target='_blank'>[code]</a>
            </center>
            </h3>
            <p>
            This is a demo of DePlot+LLM for QA and summarisation. <a href='https://arxiv.org/abs/2212.10505' target='_blank'>DePlot</a> is an image-to-text model that converts plots and charts into a textual sequence. The sequence then is used to prompt LLM for chain-of-thought reasoning. The current underlying LLMs are <a href='https://huggingface.co/google/flan-ul2' target='_blank'>flan-ul2</a> and <a href='https://openai.com/blog/chatgpt' target='_blank'>gpt-3.5-turbo</a>. To use it, simply upload your image and type a question or instruction and click 'submit', or click one of the examples to load them.   
            </p>
            """
            )

    with gr.Row():
      with gr.Column(scale=2):
        input_image = gr.Image(label="Input Image", type="pil", interactive=True)
        #input_image.style(height=512, width=512)
        instruction = gr.Textbox(placeholder="Enter your instruction/question...", label="Question/Instruction")
        #llm = gr.Dropdown(["alpaca-lora", "flan-ul2", "gpt-3.5-turbo"], label="LLM")
        llm = gr.Dropdown(["flan-ul2", "gpt-3.5-turbo"], label="LLM")
        openai_api_key_textbox = gr.Textbox(value='', 
                                            placeholder="Paste your OpenAI API key (sk-...) and hit Enter (if using OpenAI models, otherwise leave empty)",
                                            show_label=False, lines=1, type='password')          
        submit = gr.Button("Submit", variant="primary")
  
      with gr.Column(scale=2):  
        with gr.Accordion("Show intermediate table", open=False):
          output_table = gr.Textbox(lines=8, label="Intermediate Table")
        output_text = gr.Textbox(lines=8, label="Output")

    gr.Examples(
        examples=[
            ["deplot_case_study_6.png", "Rank the four methods according to average model performances. By how much does deplot outperform the second strongest approach on average across the two sets?  Show the computation.", "gpt-3.5-turbo"], # ex 1
            ["deplot_case_study_4.png", "What are the acceptance rates? And how does the acceptance change over the years?", "gpt-3.5-turbo"],  # ex 2
            ["deplot_case_study_m1.png", "Summarise the chart for me please.", "gpt-3.5-turbo"],  # ex 3
            #["deplot_case_study_m1.png", "What is the sum of numbers of Indonesia and Ireland? Remember to think step by step.", "alpaca-lora"],
            #["deplot_case_study_3.png", "By how much did China's growth rate drop? Think step by step.", "alpaca-lora"],
            #["deplot_case_study_4.png", "How many papers are submitted in 2020?", "flan-ul2"],
            ["deplot_case_study_5.png", "Which sales channel has the second highest portion?", "flan-ul2"],  # ex 4
            #["deplot_case_study_x2.png", "Summarise the chart for me please.", "alpaca-lora"],
            #["deplot_case_study_4.png", "How many papers are submitted in 2020?", "alpaca-lora"],
            #["deplot_case_study_m1.png", "Summarise the chart for me please.", "alpaca-lora"],
            #["deplot_case_study_4.png", "acceptance rate = # accepted / #submitted . What is the acceptance rate of 2010?", "flan-ul2"],
            #["deplot_case_study_m1.png", "Summarise the chart for me please.", "flan-ul2"],
        ],
        cache_examples=True,
        inputs=[input_image, instruction, llm],
        outputs=[output_table, output_text],
        fn=process_document
    )

    gr.Markdown(
            """<p style='text-align: center'><a href='https://arxiv.org/abs/2212.10505' target='_blank'>DePlot: One-shot visual language reasoning by plot-to-table translation</a></p>"""
    )
    openai.api_key = ""
    openai_api_key_textbox.change(set_openai_api_key,
                                      inputs=[openai_api_key_textbox],
                                      outputs=[])
    openai_api_key_textbox.submit(set_openai_api_key,
                                      inputs=[openai_api_key_textbox],
                                      outputs=[])
    submit.click(process_document, inputs=[input_image, instruction, llm], outputs=[output_table, output_text])
    instruction.submit(
        process_document, inputs=[input_image, instruction, llm], outputs=[output_table, output_text]
    )

demo.queue(concurrency_count=1).launch()