ttengwang commited on
Commit
af88c78
1 Parent(s): 35b6cee

replace white part to white background for BLIp prompt

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. captioner/blip2.py +2 -2
app.py CHANGED
@@ -365,5 +365,5 @@ with gr.Blocks(
365
  outputs=[chatbot, state, click_state, chat_input, image_input, wiki_output],
366
  show_progress=False, queue=True)
367
 
368
- iface.queue(concurrency_count=5, api_open=False, max_size=10)
369
- iface.launch(server_name="0.0.0.0", enable_queue=True)
 
365
  outputs=[chatbot, state, click_state, chat_input, image_input, wiki_output],
366
  show_progress=False, queue=True)
367
 
368
+ iface.queue(concurrency_count=1, api_open=False, max_size=10)
369
+ iface.launch(server_name="0.0.0.0", enable_queue=True, server_port=args.port, share=args.gradio_share)
captioner/blip2.py CHANGED
@@ -22,7 +22,7 @@ class BLIP2Captioner(BaseCaptioner):
22
  image = Image.open(image)
23
 
24
  if not self.dialogue:
25
- text_prompt = 'Context: ignore the white part in this image. Question: describe this image. Answer:'
26
  inputs = self.processor(image, text = text_prompt, return_tensors="pt").to(self.device, self.torch_dtype)
27
  out = self.model.generate(**inputs, max_new_tokens=50)
28
  captions = self.processor.decode(out[0], skip_special_tokens=True).strip()
@@ -53,4 +53,4 @@ if __name__ == '__main__':
53
  seg_mask = np.zeros((224,224))
54
  seg_mask[50:200, 50:200] = 1
55
  print(f'process image {image_path}')
56
- print(model.inference_seg(image_path, seg_mask))
 
22
  image = Image.open(image)
23
 
24
  if not self.dialogue:
25
+ text_prompt = 'Context: ignore the white background in this image. Question: describe this image. Answer:'
26
  inputs = self.processor(image, text = text_prompt, return_tensors="pt").to(self.device, self.torch_dtype)
27
  out = self.model.generate(**inputs, max_new_tokens=50)
28
  captions = self.processor.decode(out[0], skip_special_tokens=True).strip()
 
53
  seg_mask = np.zeros((224,224))
54
  seg_mask[50:200, 50:200] = 1
55
  print(f'process image {image_path}')
56
+ print(model.inference_seg(image_path, seg_mask))