|
from typing import Dict, List, Any |
|
from transformers import pipeline |
|
from PIL import Image |
|
import requests |
|
from transformers import AutoModelForCausalLM, LlamaTokenizer |
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import logging |
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
|
|
|
class EndpointHandler: |
|
def __init__(self, path=""): |
|
self.tokenizer = LlamaTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5") |
|
|
|
self.model = ( |
|
AutoModelForCausalLM.from_pretrained( |
|
"THUDM/cogvlm-grounding-generalist-hf", |
|
torch_dtype=torch.bfloat16, |
|
low_cpu_mem_usage=True, |
|
trust_remote_code=True, |
|
) |
|
.to("cuda") |
|
.eval() |
|
) |
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
""" |
|
data args: |
|
inputs (:obj: `str` | `PIL.Image` | `np.array`) |
|
kwargs |
|
Return: |
|
A :obj:`list` | `dict`: will be serialized and returned |
|
""" |
|
|
|
query = data["inputs"] |
|
img_uri = data["img_uri"] |
|
|
|
image = Image.open( |
|
requests.get( |
|
img_uri, |
|
stream=True, |
|
).raw |
|
).convert("RGB") |
|
|
|
inputs = self.model.build_conversation_input_ids( |
|
self.tokenizer, query=query, images=[image] |
|
) |
|
inputs = { |
|
"input_ids": inputs["input_ids"].unsqueeze(0).to("cuda"), |
|
"token_type_ids": inputs["token_type_ids"].unsqueeze(0).to("cuda"), |
|
"attention_mask": inputs["attention_mask"].unsqueeze(0).to("cuda"), |
|
"images": [[inputs["images"][0].to("cuda").to(torch.bfloat16)]], |
|
} |
|
gen_kwargs = {"max_length": 2048, "do_sample": False} |
|
|
|
with torch.no_grad(): |
|
outputs = self.model.generate(**inputs, **gen_kwargs) |
|
outputs = outputs[:, inputs["input_ids"].shape[1] :] |
|
result = self.tokenizer.decode(outputs[0]) |
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|