Spaces:
Runtime error
Runtime error
File size: 3,625 Bytes
9c48aca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import gradio as gr
import peft
from peft import LoraConfig
from transformers import AutoTokenizer,BitsAndBytesConfig, AutoModelForCausalLM, CLIPVisionModel, AutoProcessor
import torch
clip_model_name = "openai/clip-vit-base-patch32"
phi_model_name = "microsoft/phi-2"
tokenizer = AutoTokenizer.from_pretrained(phi_model_name, trust_remote_code=True)
processor = AutoProcessor.from_pretrained(clip_model_name)
tokenizer.pad_token = tokenizer.eos_token
IMAGE_TOKEN_ID = 23893 # token for word comment
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_embed = 768
phi_embed = 2560
# models
clip_model = CLIPVisionModel.from_pretrained(clip_model_name).to(device)
projection = torch.nn.Linear(clip_embed, phi_embed).to(device)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,)
phi_model = AutoModelForCausalLM.from_pretrained(
phi_model_name,
torch_dtype=torch.float32,
quantization_config=bnb_config,
trust_remote_code=True
)
lora_alpha = 16
lora_dropout = 0.1
lora_r = 64
peft_config = LoraConfig(
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
r=lora_r,
bias="none",
task_type="CAUSAL_LM",
target_modules=[
"q_proj",
'k_proj',
'v_proj',
'fc1',
'fc2'
]
)
peft_model = peft.get_peft_model(phi_model, peft_config).to(device)
# load weights
model_to_merge = peft_model.from_pretrained(phi_model,'./model_chkpt/lora_adaptor')
merged_model = model_to_merge.merge_and_unload()
projection.load_state_dict(torch.load('./model_chkpt/step2_projection.pth'))
def model_generate_ans(img,val_q):
max_generate_length = 100
# image
image_processed = processor(images=img, return_tensors="pt").to(device)
clip_val_outputs = clip_model(**image_processed).last_hidden_state[:,1:,:]
val_image_embeds = projection(clip_val_outputs).to(torch.float16)
img_token_tensor = torch.tensor(IMAGE_TOKEN_ID).to(device)
img_token_embeds = peft_model.model.model.embed_tokens(img_token_tensor).unsqueeze(0).unsqueeze(0)
val_q_tokenised = tokenizer(val_q, return_tensors="pt", return_attention_mask=False)['input_ids'].squeeze(0)
val_q_embeds = peft_model.model.model.embed_tokens(val_q_tokenised).unsqueeze(0)
val_combined_embeds = torch.cat([val_image_embeds, img_token_embeds, val_q_embeds], dim=1) # 4, 69, 2560
predicted_caption = torch.full((1,max_generate_length),50256)
for g in range(max_generate_length):
phi_output_logits = peft_model(inputs_embeds=val_combined_embeds)['logits'] # 4, 69, 51200
predicted_word_token_logits = phi_output_logits[:, -1, :].unsqueeze(1) # 4,1,51200
predicted_word_token = torch.argmax(predicted_word_token_logits, dim = -1) # 4,1
predicted_caption[:,g] = predicted_word_token.view(1,-1).to('cpu')
predicted_captions_decoded = tokenizer.batch_decode(predicted_caption,ignore_index = 50256)
return predicted_captions_decoded
with gr.Blocks() as demo:
gr.Markdown(
"""
# Chat with MultiModal GPT !
Build using combining clip model and phi-2 model.
"""
)
# app GUI
with gr.Row():
with gr.Column():
img_input = gr.Image(label='Image')
img_question = gr.Text(label ='Question')
with gr.Column():
img_answer = gr.Text(label ='Answer')
section_btn = gr.Button("Submit")
section_btn.click(model_generate_ans, inputs=[img_input,img_question], outputs=[img_answer])
if __name__ == "__main__":
demo.launch() |