OpenFlamingo / app.py
anas-awadalla's picture
Update app.py
8fd044b
import gradio as gr
import torch
from PIL import Image
from huggingface_hub import hf_hub_download, login
import os
login(token=os.environ["HUGGINGFACE_TOKEN"])
demo_imgs = [
["images/chinchilla_web-1024x683.jpg", "images/shiba-inu-dog-in-the-snow.jpg", "images/900.jpeg", "images/dogs.jpeg"],
["images/hummus.jpg", "images/london-underground-sign.jpg", "images/4645808729_2dfc59b6a5_z.jpg", "images/5944609705_4664531909_z.jpg"],
["images/latte.jpg", "images/COCO_train2014_000000194806.jpg", "images/istockphoto-622434332-1024x1024.jpg", "images/11887_pesto-pasta_Rita-1x1-1-501c953b29074ab193e2b5ad36e64648.jpg"],
[
"images/bcee7a-20190225-a-london-underground-sign.jpg",
"images/istockphoto-622434332-1024x1024.jpg",
],
["images/dogs.jpeg", "images/pandas.jpg", "images/900.jpeg", "images/mhJ2yWNwMtNcmijZqVEDDW-320-80.jpg"],
["images/11887_pesto-pasta_Rita-1x1-1-501c953b29074ab193e2b5ad36e64648.jpg", "images/hummus.jpg"],
]
demo_texts = [
[
"Output: This is a chinchilla. They are mainly found in Chile.",
"Output: This is a shiba. They are very popular in Japan.",
"Output: This is a flamingo. They are found in South America.",
"Output: These are labrador retrievers. They are found in the UK.",
],
[
"Output: a bowl filled with creamy hummus placed on a white countertop.",
"Output: a red and blue 'Underground' sign found in London.",
"Output: a man and a woman on a train looking at their cell phones.",
"Output: a lavish reception room with black and white tiled floor."
],
[
"Question: What latte art is presented in the image? Answer: A swan latte art is presented in the image.",
"Question: What is the man trying to catch? Answer: The man is catching a white kite that his friend is flying.",
"Question: What does the sign say? Answer: Congress Ave",
"Question: What is this dish? Answer: This is pesto pasta topped with cheese and basil.",
],
['Output: "Underground"', 'Output: "Congress Ave"'],
["Output: 2 dogs", "Output: 3 pandas", "Output: 1 flamingo", "Output: 5 fingers"],
]
# cd to open_flamingo dir and pip install .
import os
os.system("cd open_flamingo && pip install .")
from open_flamingo import create_model_and_transforms
# read bad_words.txt
with open("bad_words.txt", "r") as f:
bad_words = f.read().splitlines()
bad_words = set([word.strip().lower() for word in bad_words])
model, image_processor, tokenizer = create_model_and_transforms(
clip_vision_encoder_pretrained="openai",
clip_vision_encoder_path="ViT-L-14",
lang_encoder_path="anas-awadalla/mpt-7b",
tokenizer_path="anas-awadalla/mpt-7b",
cross_attn_every_n_layers=4,
)
checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-9B-vitl-mpt7b", "checkpoint.pt")
model.load_state_dict(torch.load(checkpoint_path), strict=False)
model.eval().to(0, dtype=torch.bfloat16)
def generate(
idx,
image,
text,
example_one_image=None,
example_one_text=None,
example_two_image=None,
example_two_text=None,
tc=False
):
if not tc:
raise gr.Error("Please read the terms and conditions.")
if image is None:
raise gr.Error("Please upload an image.")
example_one_image = (
Image.open(demo_imgs[idx][0])
if example_one_image is None
else example_one_image
)
example_one_text = (
demo_texts[idx][0]
if example_one_text is None
else f"Output: {example_one_text}"
)
example_two_image = (
Image.open(demo_imgs[idx][1])
if example_two_image is None
else example_two_image
)
example_two_text = (
demo_texts[idx][1]
if example_two_text is None
else f"Output: {example_two_text}"
)
if idx != -1:
example_three_image = (
Image.open(demo_imgs[idx][2])
)
example_three_text = (
demo_texts[idx][2]
)
example_four_image = (
Image.open(demo_imgs[idx][3])
)
example_four_text = (
demo_texts[idx][3]
)
if (
example_one_image is None
or example_one_text is None
or example_two_image is None
or example_two_text is None
):
raise gr.Error("Please fill in all the fields (image and text).")
demo_plus_text = f"<image>{example_one_text}<|endofchunk|><image>{example_two_text}<|endofchunk|>"
if idx != -1:
demo_plus_text += f"<image>{example_three_text}<|endofchunk|><image>{example_four_text}<|endofchunk|>"
demo_plus_text += (
"<image>Output:" if idx != 2 else f"<image>Question: {text.strip()} Answer:"
)
print(demo_plus_text)
lang_x = tokenizer(demo_plus_text, return_tensors="pt")
input_ids = lang_x["input_ids"]
attention_mask = lang_x["attention_mask"]
vision_x = [image_processor(example_one_image).unsqueeze(0), image_processor(example_two_image).unsqueeze(0)]
if idx != -1:
vision_x.append(image_processor(example_three_image).unsqueeze(0))
vision_x.append(image_processor(example_four_image).unsqueeze(0))
vision_x.append(image_processor(image).unsqueeze(0))
vision_x = torch.cat(vision_x, dim=0)
vision_x = vision_x.unsqueeze(1).unsqueeze(0)
print(vision_x.shape)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
output = model.generate(
vision_x=vision_x.to(0, dtype=torch.bfloat16),
lang_x=input_ids.to(0),
attention_mask=attention_mask.to(0),
max_new_tokens=30,
num_beams=3,
# do_sample=True,
# temperature=0.3,
# top_k=0,
)
gen_text = tokenizer.decode(
output[0][len(input_ids[0]):], skip_special_tokens=True
)
print(gen_text)
gen_text = gen_text.split("Output")[0]
gen_text = gen_text.split("Question")[0]
for word in gen_text.split(" "):
word = (
word.strip()
.lower()
.replace(".", "")
.replace(",", "")
.replace("?", "")
.replace("!", "")
)
if word in bad_words:
print("Found bad word: ", word)
raise gr.Error(
"We found harmful language in the generated text. Please try again."
)
return (
f"Output:{gen_text}"
if idx != 2
else f"Question: {text.strip()} Answer: {gen_text}"
)
with gr.Blocks() as demo:
gr.Markdown(
"""
# 🦩 OpenFlamingo Demo
Paper: [OpenFlamingo: An Open-Source Framework for Training Large Autoregressive Vision-Language Models](https://arxiv.org/abs/2308.01390)
Blog posts: #1 [Announcing OpenFlamingo: An open-source framework for training vision-language models with in-context learning](https://laion.ai/blog/open-flamingo/) // #2 [OpenFlamingo v2: New Models and Enhanced Training Setup](https://laion.ai/blog/open-flamingo-v2/)
GitHub: [open_flamingo](https://github.com/mlfoundations/open_flamingo)
In this demo we showcase the in-context learning capabilities of the OpenFlamingo-9B model, a large multimodal model trained on top of mpt-7b. Note that we add two additional demonstrations to the ones presented to improve the demo experience.
The model is trained on an interleaved mixture of text and images and is able to generate text conditioned on sequences of images/text. To safeguard against harmful generations, we detect toxic text in the model output and reject it. However, we understand that this is not a perfect solution and we encourage you to use this demo responsibly. If you find that the model is generating harmful text, please report it using this [form](https://forms.gle/StbcPvyyW2p3Pc7z6).
"""
)
with gr.Accordion("See terms and conditions"):
gr.Markdown("""**Please read the following information carefully before proceeding.**
[OpenFlamingo-9B](https://huggingface.co/openflamingo/OpenFlamingo-9B-vitl-mpt7b) is a **research prototype** that aims to enable users to interact with AI through both language and images. AI agents equipped with both language and visual understanding can be useful on a larger variety of tasks compared to models that communicate solely via language. By releasing an open-source research prototype, we hope to help the research community better understand the risks and limitations of modern visual-language AI models and accelerate the development of safer and more reliable methods.
**Limitations.** OpenFlamingo-9B is built on top of the [MPT-7B](https://huggingface.co/mosaicml/mpt-7b) large language model developed by Together.xyz. Large language models are trained on mostly unfiltered internet data, and have been shown to be able to produce toxic, unethical, inaccurate, and harmful content. On top of this, OpenFlamingo’s ability to support visual inputs creates additional risks, since it can be used in a wider variety of applications; image+text models may carry additional risks specific to multimodality. Please use discretion when assessing the accuracy or appropriateness of the model’s outputs, and be mindful before sharing its results.
**Privacy and data collection.** This demo does NOT store any personal information on its users, and it does NOT store user queries.""")
read_tc = gr.Checkbox(
label="I have read and agree to the terms and conditions", value=False)
with gr.Tab("📷 Image Captioning"):
with gr.Row():
with gr.Column(scale=1):
demo_image_one = gr.Image(value=Image.open(demo_imgs[1][0])
)
demo_text_one = gr.Textbox(
value=demo_texts[1][0], label="Demonstration sample 1", lines=2
)
with gr.Column(scale=1):
demo_image_two = gr.Image(value=Image.open(demo_imgs[1][1])
)
demo_text_two = gr.Textbox(
value=demo_texts[1][1], label="Demonstration sample 2", lines=2
)
with gr.Column(scale=1):
query_image = gr.Image(type="pil")
text_output = gr.Textbox(value="Output:", label="Model output")
run_btn = gr.Button("Run model")
def on_click_fn(img, read_tc): return generate(1, img, "", tc=read_tc)
run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
with gr.Tab("🦓 Animal recognition"):
with gr.Row():
with gr.Column(scale=1):
demo_image_one = gr.Image(
value=Image.open(demo_imgs[0][0])
)
demo_text_one = gr.Textbox(
value=demo_texts[0][0], label="Demonstration sample 1", lines=2
)
with gr.Column(scale=1):
demo_image_two = gr.Image(
value=Image.open(demo_imgs[0][1])
)
demo_text_two = gr.Textbox(
value=demo_texts[0][1], label="Demonstration sample 2", lines=2
)
with gr.Column(scale=1):
query_image = gr.Image(type="pil")
text_output = gr.Textbox(value="Output:", label="Model output")
run_btn = gr.Button("Run model")
def on_click_fn(img, read_tc): return generate(0, img, "", tc=read_tc)
run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
with gr.Tab("🔢 Counting objects"):
with gr.Row():
with gr.Column(scale=1):
demo_image_one = gr.Image(
value=Image.open(demo_imgs[4][0])
)
demo_text_one = gr.Textbox(
value=demo_texts[4][0], label="Demonstration sample 1", lines=2
)
with gr.Column(scale=1):
demo_image_two = gr.Image(
value=Image.open(demo_imgs[4][1])
)
demo_text_two = gr.Textbox(
value=demo_texts[4][1], label="Demonstration sample 2", lines=2
)
with gr.Column(scale=1):
query_image = gr.Image(type="pil")
text_output = gr.Textbox(value="Output:", label="Model output")
run_btn = gr.Button("Run model")
def on_click_fn(img, read_tc): return generate(4, img, "", tc=read_tc)
run_btn.click(on_click_fn, inputs=[query_image, read_tc], outputs=[text_output])
with gr.Tab("🕵️ Visual Question Answering"):
with gr.Row():
with gr.Column(scale=1):
demo_image_one = gr.Image(
value=Image.open(demo_imgs[2][0])
)
demo_text_one = gr.Textbox(
value=demo_texts[2][0], label="Demonstration sample 1", lines=2
)
with gr.Column(scale=1):
demo_image_two = gr.Image(
value=Image.open(demo_imgs[2][1])
)
demo_text_two = gr.Textbox(
value=demo_texts[2][1], label="Demonstration sample 2", lines=2
)
with gr.Column(scale=1):
query_image = gr.Image(type="pil")
question = gr.Textbox(
label="Question: (e.g. 'What is the color of the object?' without \"Question:\" prefix)"
)
text_output = gr.Textbox(value="", label="Model output")
run_btn = gr.Button("Run model")
def on_click_fn(img, txt, read_tc): return generate(2, img, txt, tc=read_tc)
run_btn.click(
on_click_fn, inputs=[query_image, question, read_tc], outputs=[text_output]
)
with gr.Tab("🌎 Custom"):
gr.Markdown(
"""### Customize the demonstration by uploading your own images and text samples.
### **Note: Any text prompt you use will be prepended with an 'Output:', so you don't need to include it in your prompt.**"""
)
with gr.Row():
with gr.Column(scale=1):
demo_image_one = gr.Image(type="pil")
demo_text_one = gr.Textbox(
label="Demonstration sample 1", lines=2)
with gr.Column(scale=1):
demo_image_two = gr.Image(type="pil")
demo_text_two = gr.Textbox(
label="Demonstration sample 2", lines=2)
with gr.Column(scale=1):
query_image = gr.Image(type="pil")
text_output = gr.Textbox(value="Output:", label="Model output")
run_btn = gr.Button("Run model")
on_click_fn = lambda img, read_tc, example_img_1, example_txt_1, example_img_2, example_txt_2: generate(
-1, img, "", example_img_1, example_txt_1, example_img_2, example_txt_2, tc=read_tc
)
run_btn.click(
on_click_fn,
inputs=[
query_image,
read_tc,
demo_image_one,
demo_text_one,
demo_image_two,
demo_text_two,
],
outputs=[text_output],
)
demo.queue(concurrency_count=1)
demo.launch()