deplot_plus_llm / app.py
fl399's picture
Update app.py
22c721b
raw history blame
No virus
11.5 kB
import os
import torch
import requests
import gradio as gr
import transformers
from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor
from peft import PeftModel
## CoT prompts
def _add_markup(table):
parts = [p.strip() for p in table.splitlines(keepends=False)]
if parts[0].startswith('TITLE'):
result = f"Title: {parts[0].split(' | ')[1].strip()}\n"
rows = parts[1:]
else:
result = ''
rows = parts
prefixes = ['Header: '] + [f'Row {i+1}: ' for i in range(len(rows) - 1)]
return result + '\n'.join(prefix + row for prefix, row in zip(prefixes, rows))
_TABLE = """Year | Democrats | Republicans | Independents
2004 | 68.1% | 45.0% | 53.0%
2006 | 58.0% | 42.0% | 53.0%
2007 | 59.0% | 38.0% | 45.0%
2009 | 72.0% | 49.0% | 60.0%
2011 | 71.0% | 51.2% | 58.0%
2012 | 70.0% | 48.0% | 53.0%
2013 | 72.0% | 41.0% | 60.0%"""
_INSTRUCTION = 'Read the table below to answer the following questions.'
_TEMPLATE = f"""First read an example then the complete question for the second table.
------------
{_INSTRUCTION}
{_add_markup(_TABLE)}
Q: In which year republicans have the lowest favor rate?
A: Let's find the column of republicans. Then let's extract the favor rates, they [45.0, 42.0, 38.0, 49.0, 51.2, 48.0, 41.0]. The smallest number is 38.0, that's Row 3. Row 3 is year 2007. The answer is 2007.
Q: What is the sum of Democrats' favor rates of 2004, 2012, and 2013?
A: Let's find the rows of years 2004, 2012, and 2013. We find Row 1, 6, 7. The favor dates of Demoncrats on that 3 rows are 68.1, 70.0, and 72.0. 68.1+70.0+72=210.1. The answer is 210.1.
Q: By how many points do Independents surpass Republicans in the year of 2011?
A: Let's find the row with year = 2011. We find Row 5. We extract Independents and Republicans' numbers. They are 58.0 and 51.2. 58.0-51.2=6.8. The answer is 6.8.
Q: Which group has the overall worst performance?
A: Let's sample a couple of years. In Row 1, year 2004, we find Republicans having the lowest favor rate 45.0 (since 45.0<68.1, 45.0<53.0). In year 2006, Row 2, we find Republicans having the lowest favor rate 42.0 (42.0<58.0, 42.0<53.0). The trend continues to other years. The answer is Republicans.
Q: Which party has the second highest favor rates in 2007?
A: Let's find the row of year 2007, that's Row 3. Let's extract the numbers on Row 3: [59.0, 38.0, 45.0]. 45.0 is the second highest. 45.0 is the number of Independents. The answer is Independents.
{_INSTRUCTION}"""
## alpaca-lora
# debugging...
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
BASE_MODEL = "decapoda-research/llama-7b-hf"
LORA_WEIGHTS = "tloen/alpaca-lora-7b"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=False,
torch_dtype=torch.float16,
device_map="auto",
)
model = PeftModel.from_pretrained(
model, LORA_WEIGHTS, torch_dtype=torch.float16, force_download=True
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
)
model = PeftModel.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
if device != "cpu":
model.half()
model.eval()
if torch.__version__ >= "2":
model = torch.compile(model)
## FLAN-UL2
TOKEN = os.environ.get("API_TOKEN", None)
API_URL = "https://api-inference.huggingface.co/models/google/flan-ul2"
headers = {"Authorization": f"Bearer {TOKEN}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
## OpenAI models
def set_openai_api_key(api_key):
if api_key and api_key.startswith("sk-") and len(api_key) > 50:
openai.api_key = api_key
def get_response_from_openai(prompt, model="gpt-3.5-turbo", max_output_tokens=128):
messages = [{"role": "assistant", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.7,
max_tokens=max_output_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
ret = response.choices[0].message['content']
return ret
## deplot models
model_deplot = Pix2StructForConditionalGeneration.from_pretrained("google/deplot", torch_dtype=torch.bfloat16).to(0)
processor_deplot = Pix2StructProcessor.from_pretrained("google/deplot")
def evaluate(
table,
question,
llm="alpaca-lora",
input=None,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=128,
**kwargs,
):
prompt_0shot = _INSTRUCTION + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
prompt = _TEMPLATE + "\n" + _add_markup(table) + "\n" + "Q: " + question + "\n" + "A:"
if llm == "alpaca-lora":
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
elif llm == "flan-ul2":
output = query({"inputs": prompt_0shot})[0]["generated_text"]
elif llm == "gpt-3.5-turbo":
output = get_response_from_openai(prompt_0shot)
else:
RuntimeError(f"No such LLM: {llm}")
return output
def process_document(image, question, llm):
# image = Image.open(image)
inputs = processor_deplot(images=image, text="Generate the underlying data table for the figure below:", return_tensors="pt").to(0, torch.bfloat16)
predictions = model_deplot.generate(**inputs, max_new_tokens=512)
table = processor_deplot.decode(predictions[0], skip_special_tokens=True).replace("<0x0A>", "\n")
# send prompt+table to LLM
res = evaluate(table, question, llm=llm)
if llm == "alpaca-lora":
return [table, res.split("A:")[-1]]
else:
return [table, res]
theme = gr.themes.Monochrome(
primary_hue="indigo",
secondary_hue="blue",
neutral_hue="slate",
radius_size=gr.themes.sizes.radius_sm,
font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
)
with gr.Blocks(theme=theme) as demo:
with gr.Column():
gr.Markdown(
"""<h1><center>DePlot+LLM: Multimodal chain-of-thought reasoning on plots</center></h1>
<p>
"This is a demo for DePlot+LLM for QA and summarisation. <a href='https://arxiv.org/abs/2212.10505' target='_blank'>DePlot</a> is an image-to-text model that converts plots and charts into a textual sequence. The sequence then is used to prompt LLM for chain-of-thought reasoning. The current underlying LLMs are <a href='https://huggingface.co/spaces/tloen/alpaca-lora' target='_blank'>alpaca-lora</a> and <a href='https://huggingface.co/google/flan-ul2' target='_blank'>flan-ul2</a>. To use it, simply upload your image and type a question or instruction and click 'submit', or click one of the examples to load them. Read more at the links below."
</p>
"""
)
# #with gr.Row():
# llm = gr.Dropdown(
# ["alpaca-lora", "flan-ul2"], label="LLM", info="We will add more LLMs.")
# num_shot = gr.Dropdown(
# ["0-shot", "1-shot"], label="shots", info="How many example tables in the prompt?")
# openai_api = gr.Textbox(label="openai api (if using OpenAI models, otherwise leave empty)")
with gr.Row():
with gr.Column(scale=2):
input_image = gr.Image(label="Input Image", type="pil", interactive=True)
#input_image.style(height=512, width=512)
instruction = gr.Textbox(placeholder="Enter your instruction/question...", label="Question/Instruction")
llm = gr.Dropdown(["alpaca-lora", "flan-ul2", "gpt-3.5-turbo"], label="LLM")
openai_api_key_textbox = gr.Textbox(placeholder="Paste your OpenAI API key (sk-...) and hit Enter (if using OpenAI models, otherwise leave empty)",
show_label=False, lines=1, type='password')
submit = gr.Button("Submit", variant="primary")
with gr.Column(scale=2):
with gr.Accordion("Show intermediate table", open=False):
output_table = gr.Textbox(lines=8, label="Intermediate Table")
output_text = gr.Textbox(lines=8, label="Output")
gr.Examples(
examples=[["deplot_case_study_m1.png", "What is the sum of numbers of Indonesia and Ireland? Remember to think step by step.", "alpaca-lora"],
["deplot_case_study_m1.png", "Summarise the chart for me please.", "alpaca-lora"],
["deplot_case_study_3.png", "By how much did China's growth rate drop? Think step by step.", "alpaca-lora"],
["deplot_case_study_4.png", "How many papers are submitted in 2020?", "alpaca-lora"],
["deplot_case_study_x2.png", "Summarise the chart for me please.", "alpaca-lora"],
["deplot_case_study_4.png", "How many papers are submitted in 2020?", "flan-ul2"],
["deplot_case_study_4.png", "acceptance rate = # accepted / #submitted . What is the acceptance rate of 2010?", "flan-ul2"],
["deplot_case_study_m1.png", "Summarise the chart for me please.", "flan-ul2"],
],
cache_examples=True,
inputs=[input_image, instruction, llm],
outputs=[output_table, output_text],
fn=process_document
)
gr.Markdown(
"""<p style='text-align: center'><a href='https://arxiv.org/abs/2212.10505' target='_blank'>DePlot: One-shot visual language reasoning by plot-to-table translation</a></p>"""
)
openai_api_key_textbox.change(set_openai_api_key,
inputs=[openai_api_key_textbox],
outputs=[])
openai_api_key_textbox.submit(set_openai_api_key,
inputs=[openai_api_key_textbox],
outputs=[])
submit.click(process_document, inputs=[input_image, instruction, llm], outputs=[output_table, output_text])
instruction.submit(
process_document, inputs=[input_image, instruction, llm], outputs=[output_table, output_text]
)
demo.launch(debug=True)