sooh-j commited on
Commit
0a6c0fe
1 Parent(s): 254bc93

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +2 -28
handler.py CHANGED
@@ -12,36 +12,13 @@ class EndpointHandler():
12
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
13
  self.model_base = "Salesforce/blip2-opt-2.7b"
14
  self.model_name = "sooh-j/blip2-vizwizqa"
15
- # self.base_model = Blip2ForConditionalGeneration.from_pretrained(self.model_base, load_in_8bit=True)
16
  # self.pipe = Blip2ForConditionalGeneration.from_pretrained(self.model_base, load_in_8bit=True, torch_dtype=torch.float16)
17
 
18
- quantization_config = BitsAndBytesConfig(load_in_8bit=True)
19
-
20
- # self.processor = Blip2Processor.from_pretrained(self.model_name)
21
  self.processor = AutoProcessor.from_pretrained(self.model_name)
22
- # self.model = BlipForQuestionAnswering.from_pretrained(self.model_name,
23
- # self.model = AutoModelForCausalLM.from_pretrained(self.model_name,
24
  self.model = Blip2ForConditionalGeneration.from_pretrained(self.model_name,
25
  device_map="auto",
26
- # load_in_8bit=True,
27
- # quantization_config=quantization_config,
28
  ).to(self.device)
29
- # self.model = PeftModel.from_pretrained(self.model_name, self.base_model_name).to(self.device)
30
 
31
- # inputs = data.get("inputs")
32
- # imageBase64 = inputs.get("image")
33
- # # imageURL = inputs.get("image")
34
- # text = inputs.get("text")
35
- # # print(imageURL)
36
- # # print(text)
37
- # # image = Image.open(requests.get(imageBase64, stream=True).raw)
38
-
39
- # image = Image.open(BytesIO(base64.b64decode(imageBase64.split(",")[1].encode())))
40
- # inputs = self.processor(text=text, images=image, return_tensors="pt", padding=True)
41
- # outputs = self.model(**inputs)
42
- # embeddings = outputs.image_embeds.detach().numpy().flatten().tolist()
43
- # return { "embeddings": embeddings }
44
-
45
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
46
  """
47
  data args:
@@ -89,15 +66,12 @@ class EndpointHandler():
89
  # processed_images = self.processor(images=raw_images, return_tensors="pt")
90
  # processed_images["pixel_values"] = processed_images["pixel_values"].to(device)
91
  # processed_images = {**processed_images, **parameters}
92
-
93
- # with torch.no_grad():
94
- # out = self.model.generate(**processed_images)
95
- # captions = self.processor.batch_decode(out, skip_special_tokens=True)
96
  ####
97
 
98
 
99
  prompt = f"Question: {question}, Answer:"
100
- processed = self.processor(images=image, text=prompt, return_tensors="pt").to(self.device)#, torch.float16)
101
 
102
  # answer = self._generate_answer(
103
  # model_path, prompt, image,
 
12
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
13
  self.model_base = "Salesforce/blip2-opt-2.7b"
14
  self.model_name = "sooh-j/blip2-vizwizqa"
 
15
  # self.pipe = Blip2ForConditionalGeneration.from_pretrained(self.model_base, load_in_8bit=True, torch_dtype=torch.float16)
16
 
 
 
 
17
  self.processor = AutoProcessor.from_pretrained(self.model_name)
 
 
18
  self.model = Blip2ForConditionalGeneration.from_pretrained(self.model_name,
19
  device_map="auto",
 
 
20
  ).to(self.device)
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
23
  """
24
  data args:
 
66
  # processed_images = self.processor(images=raw_images, return_tensors="pt")
67
  # processed_images["pixel_values"] = processed_images["pixel_values"].to(device)
68
  # processed_images = {**processed_images, **parameters}
69
+
 
 
 
70
  ####
71
 
72
 
73
  prompt = f"Question: {question}, Answer:"
74
+ processed = self.processor(images=image, text=prompt, return_tensors="pt").to(self.device)
75
 
76
  # answer = self._generate_answer(
77
  # model_path, prompt, image,