GPT-Vision / modeling_gpt2vision.py
damerajee's picture
Update modeling_gpt2vision.py
39f38b0 verified
raw
history blame
3.66 kB
import torch
from torch import nn
from transformers import PreTrainedModel
import re
from .vision_encoder import VisionEncoder
from .configuration_gpt2vision import GPT2VisionConfig
from .modeling_gpt2 import GPT2LMHeadModel
IMAGE_TOKEN = "{after}")))
return torch.cat(embeds, dim=1)
def generate(
self,
image_embeds,
prompt,
tokenizer,
eos_text="<|endoftext|>",
max_new_tokens=128,
**kwargs,
):
eos_tokens = tokenizer(eos_text, add_special_tokens=False)["input_ids"]
generate_config = {
"eos_token_id": eos_tokens,
"bos_token_id": tokenizer.bos_token_id,
"pad_token_id": tokenizer.eos_token_id,
"max_new_tokens": max_new_tokens,
**kwargs,
}
with torch.no_grad():
inputs_embeds = self.input_embeds(prompt, image_embeds, tokenizer)
print("inputs_embeds",inputs_embeds.size())
output_ids = self.text_model.generate(
inputs_embeds=inputs_embeds, **generate_config
)
return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
def answer_question(
self,
image_embeds,
question,
tokenizer,
chat_history="",
result_queue=None,
**kwargs,
):
prompt = f"<image>\n\n{chat_history}Question: {question}\n\nAnswer: "
answer = self.generate(
image_embeds,
prompt,
tokenizer,
eos_text="<|endoftext|>",
max_new_tokens=256,
**kwargs,
)[0]
return answer