GPT-K / app.py
cwkuo
tune some default params
6b2ffd3
from pathlib import Path
import os
import time
import gradio as gr
import requests
import numpy as np
from pathlib import Path
import torch
import torch.nn.functional as F
import open_clip
import faiss
from transformers import TextIteratorStreamer
from threading import Thread
from conversation import default_conversation, conv_templates, Conversation
from knowledge import TextDB
from knowledge.transforms import five_crop, nine_crop
from knowledge.utils import refine_cosine
from model import get_gptk_model, get_gptk_image_transform
no_change_btn = gr.Button.update()
enable_btn = gr.Button.update(interactive=True)
disable_btn = gr.Button.update(interactive=False)
knwl_none = (None, ) * 30
knwl_unchange = (gr.Image.update(), ) * 15 + (gr.Textbox.update(), ) * 15
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
if "OPENAI_API_KEY" not in os.environ:
print("OPENAI_API_KEY not found, skip content moderation check...")
return False
url = "https://api.openai.com/v1/moderations"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]
}
text = text.replace("\n", "")
data = "{" + '"input": ' + f'"{text}"' + "}"
data = data.encode("utf-8")
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()["results"][0]["flagged"]
except requests.exceptions.RequestException as e:
flagged = False
except KeyError as e:
flagged = False
return flagged
def load_demo():
state = default_conversation.copy()
return state
def regenerate(state: Conversation):
state.messages[-1][-1] = None
prev_human_msg = state.messages[-2]
if type(prev_human_msg[1]) in (tuple, list):
prev_human_msg[1] = prev_human_msg[1][:2]
state.skip_next = False
return (state, state.to_gradio_chatbot(), "", None, disable_btn, disable_btn, disable_btn)
def clear_history():
state = default_conversation.copy()
return (state, state.to_gradio_chatbot(), "", None) + (enable_btn, disable_btn, disable_btn) + knwl_none
def add_text(state: Conversation, text, image):
if len(text) <= 0 and image is None:
state.skip_next = True
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 3
if violates_moderation(text):
state.skip_next = True
return (state, state.to_gradio_chatbot(), moderation_msg, None) + (no_change_btn,) * 3
if image is not None:
text = (text, image)
if len(state.get_images(return_pil=True)) > 0:
state = default_conversation.copy()
state.append_message(state.roles[0], text)
state.append_message(state.roles[1], None)
state.skip_next = False
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 3
def search(image, pos, topk, knwl_db, knwl_idx):
with torch.cuda.amp.autocast():
image = query_trans(image).unsqueeze(0).to(device)
query = F.normalize(query_enc(image), dim=-1)
query = query.cpu().numpy()
_, I = knwl_idx.search(query, 4*topk)
score, I = refine_cosine(knwl_db.feature, query, I, device, topk)
score, I = score.flatten(), I.flatten()
embd, text = knwl_db[I]
pos = np.full((topk, ), fill_value=pos)
query = torch.FloatTensor(query).unsqueeze(0).to(device)
embd = torch.FloatTensor(embd).unsqueeze(0).to(device)
pos = torch.LongTensor(pos).unsqueeze(0).to(device)
score = torch.FloatTensor(score).unsqueeze(0).to(device)
return query, embd, pos, score, text
def retrieve_knowledge(image):
knwl_embd = {}
knwl_text = {}
for query_type, topk_q in topk.items():
if topk_q == 0: continue
if query_type == "whole":
images = [image, ]
knwl_text[query_type] = {i: {} for i in range(1)}
elif query_type == "five":
images = five_crop(image)
knwl_text[query_type] = {i: {} for i in range(5)}
elif query_type == "nine":
images = nine_crop(image)
knwl_text[query_type] = {i: {} for i in range(9)}
else:
raise ValueError
knwl_embd[query_type] = {}
for knwl_type, (knwl_db_t, knwl_idx_t) in knwl_db.items():
query, embed, pos, score = [], [], [], []
for i, img in enumerate(images):
query_i, embed_i, pos_i, score_i, text_i = search(
img, i, topk_q, knwl_db_t, knwl_idx_t
)
query.append(query_i)
embed.append(embed_i)
pos.append(pos_i)
score.append(score_i)
knwl_text[query_type][i][knwl_type] = text_i
query = torch.cat(query, dim=1)
embed = torch.cat(embed, dim=1)
pos = torch.cat(pos, dim=1)
score = torch.cat(score, dim=1)
knwl_embd[query_type][knwl_type] = {
"embed": embed, "query": query, "pos": pos, "score": score
}
return knwl_embd, knwl_text
@torch.inference_mode()
def generate(state: Conversation, temperature, top_p, max_new_tokens):
if state.skip_next: # This generate call is skipped due to invalid inputs
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 3 + knwl_unchange
return
if len(state.messages) == state.offset + 2: # First round of conversation
new_state = conv_templates["gptk"].copy()
new_state.append_message(new_state.roles[0], state.messages[-2][1])
new_state.append_message(new_state.roles[1], None)
state = new_state
# retrieve and visualize knowledge
image = state.get_images(return_pil=True)[0]
knwl_embd, knwl = retrieve_knowledge(image)
knwl_img, knwl_txt, idx = [None, ] * 15, ["", ] * 15, 0
for query_type, knwl_pos in (("whole", 1), ("five", 5), ("nine", 9)):
if query_type == "whole":
images = [image, ]
elif query_type == "five":
images = five_crop(image)
elif query_type == "nine":
images = nine_crop(image)
for pos in range(knwl_pos):
try:
txt = ""
for k, v in knwl[query_type][pos].items():
v = ", ".join([vi.replace("_", " ") for vi in v])
txt += f"**[{k.upper()}]:** {v}\n\n"
knwl_txt[idx] += txt
img = images[pos]
img = query_trans.transforms[0](img)
img = query_trans.transforms[1](img)
img = query_trans.transforms[2](img)
knwl_img[idx] = img
except KeyError:
pass
idx += 1
knwl_vis = tuple(knwl_img + knwl_txt)
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 3 + knwl_vis
# generate output
prompt = state.get_prompt().replace("USER: <image>\n", "")
prompt = prompt.split("USER:")[-1].replace("ASSISTANT:", "")
image_pt = gptk_trans(image).to(device).unsqueeze(0)
samples = {"image": image_pt, "knowledge": knwl_embd, "prompt": prompt}
streamer = TextIteratorStreamer(
gptk_model.llm_tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15
)
thread = Thread(
target=gptk_model.generate,
kwargs=dict(
samples=samples,
use_nucleus_sampling=(temperature > 0.001),
max_length=min(int(max_new_tokens), 1024),
top_p=float(top_p),
temperature=float(temperature),
streamer=streamer,
num_beams=1,
length_penalty=0.0,
auto_cast=True
)
)
thread.start()
generated_text = ""
for new_text in streamer:
generated_text += new_text
state.messages[-1][-1] = generated_text + "β–Œ"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 3 + knwl_unchange
time.sleep(0.03)
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 3 + knwl_unchange
title_markdown = ("""
# GPT-K: Knowledge Augmented Vision-and-Language Assistant
""")
tos_markdown = ("""
### Terms of use
By using this service, users are required to agree to the following terms:
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
""")
learn_more_markdown = ("""
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
""")
def build_demo():
textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
imagebox = gr.Image(type="pil")
with gr.Blocks(title="GPT-K", theme=gr.themes.Base()) as demo:
state = gr.State()
gr.Markdown(title_markdown)
with gr.Row():
with gr.Column(scale=3):
gr.Examples(examples=[
["examples/mona_lisa.jpg", "Discuss the historical impact and the significance of this painting in the art world."],
["examples/mona_lisa_dog.jpg", "Describe this photo in detail."],
["examples/horseshoe_bend.jpg", "What are the possible reasons of the formation of this sight?"],
], inputs=[imagebox, textbox])
imagebox.render()
with gr.Row():
with gr.Column(scale=8):
textbox.render()
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value="Submit")
with gr.Row():
regenerate_btn = gr.Button(value="πŸ”„ Regenerate", interactive=False, scale=1)
clear_btn = gr.Button(value="πŸ—‘οΈ Clear", interactive=False, scale=1)
with gr.Accordion("Parameters", open=True):
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.1, interactive=True, label="Temperature",)
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
with gr.Column(scale=6):
chatbot = gr.Chatbot(elem_id="chatbot", label="GPT-K Chatbot", height=550)
gr.Markdown("## Retrieved Knowledge")
knwl_img, knwl_txt = [], []
for query_type, knwl_pos in (("whole", 1), ("five", 5), ("nine", 9)):
with gr.Tab(query_type):
for p in range(knwl_pos):
with gr.Tab(str(p)):
with gr.Row():
with gr.Column(scale=1):
knwl_img.append(gr.Image(type="pil", show_label=False, interactive=False))
with gr.Column(scale=7):
knwl_txt.append(gr.Markdown())
knwl_vis = knwl_img + knwl_txt
gr.Markdown(tos_markdown)
gr.Markdown(learn_more_markdown)
# Register listeners
btn_list = [submit_btn, regenerate_btn, clear_btn]
regenerate_btn.click(
regenerate, [state], [state, chatbot, textbox, imagebox] + btn_list
).then(
generate,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list + knwl_vis
)
clear_btn.click(
clear_history, None, [state, chatbot, textbox, imagebox] + btn_list + knwl_vis
)
textbox.submit(
add_text, [state, textbox, imagebox], [state, chatbot, textbox, imagebox] + btn_list
).then(
generate,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list + knwl_vis
)
submit_btn.click(
add_text, [state, textbox, imagebox], [state, chatbot, textbox, imagebox] + btn_list
).then(
generate,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list + knwl_vis
)
demo.load(load_demo, None, [state])
return demo
def build_knowledge():
def get_knwl(knowledge_db):
knwl_db = TextDB(Path(knowledge_db)/"knowledge_db.hdf5")
knwl_db.feature = knwl_db.feature
knwl_idx = faiss.read_index(str(Path(knowledge_db)/"faiss.index"))
knwl_idx.add(knwl_db.feature.astype(np.float32))
return knwl_db, knwl_idx
knwl_db = {
"obj": get_knwl('knowledge/(dataset-object)(clip-model-ViT-g-14)(dbscan)(eps-0.15)(ms-1)'),
"act": get_knwl('knowledge/(dataset-action)(clip-model-ViT-g-14)(dbscan)(eps-0.15)(ms-1)'),
"attr": get_knwl('knowledge/(dataset-attribute)(clip-model-ViT-g-14)(dbscan)(eps-0.15)(ms-1)'),
}
d_knwl = knwl_db["obj"][0].feature.shape[-1]
return knwl_db, d_knwl
def build_query_model():
query_enc, _, query_trans = open_clip.create_model_and_transforms(
"ViT-g-14", pretrained="laion2b_s34b_b88k", precision='fp16'
)
query_enc = query_enc.visual.to(device).eval()
return query_enc, query_trans
def build_gptk_model():
_, gptk_trans = get_gptk_image_transform()
topk = {"whole": 60, "five": 24, "nine": 16}
gptk_model = get_gptk_model(d_knwl=d_knwl, topk=topk)
gptk_ckpt = "model/ckpt/gptk-vicuna7b.pt"
gptk_ckpt = torch.load(gptk_ckpt, map_location="cpu")
gptk_model.load_state_dict(gptk_ckpt, strict=False)
gptk_model = gptk_model.to(device).eval()
return gptk_model, gptk_trans, topk
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
knwl_db, d_knwl = build_knowledge()
gptk_model, gptk_trans, topk = build_gptk_model()
query_enc, query_trans = build_query_model()
demo = build_demo()
demo.queue().launch()