supersolar commited on
Commit
c2469bd
·
verified ·
1 Parent(s): 0cb853f

Update utils/utils.py

Browse files
Files changed (1) hide show
  1. utils/utils.py +58 -0
utils/utils.py CHANGED
@@ -2,6 +2,64 @@
2
  import torch
3
  import supervision as sv
4
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def detect_objects_in_image(image_input_path, texts, device):
7
  # 加载图像
 
2
  import torch
3
  import supervision as sv
4
  from PIL import Image
5
+ import os
6
+ from typing import Union, Any, Tuple, Dict
7
+ from unittest.mock import patch
8
+
9
+ import torch
10
+ from PIL import Image
11
+ from transformers import AutoModelForCausalLM, AutoProcessor
12
+ from transformers.dynamic_module_utils import get_imports
13
+
14
+ FLORENCE_CHECKPOINT = "microsoft/Florence-2-base"
15
+ FLORENCE_OBJECT_DETECTION_TASK = '<OD>'
16
+ FLORENCE_DETAILED_CAPTION_TASK = '<MORE_DETAILED_CAPTION>'
17
+ FLORENCE_CAPTION_TO_PHRASE_GROUNDING_TASK = '<CAPTION_TO_PHRASE_GROUNDING>'
18
+ FLORENCE_OPEN_VOCABULARY_DETECTION_TASK = '<OPEN_VOCABULARY_DETECTION>'
19
+ FLORENCE_DENSE_REGION_CAPTION_TASK = '<DENSE_REGION_CAPTION>'
20
+
21
+
22
+ def fixed_get_imports(filename: Union[str, os.PathLike]) -> list[str]:
23
+ """Work around for https://huggingface.co/microsoft/phi-1_5/discussions/72."""
24
+ if not str(filename).endswith("/modeling_florence2.py"):
25
+ return get_imports(filename)
26
+ imports = get_imports(filename)
27
+ imports.remove("flash_attn")
28
+ return imports
29
+
30
+
31
+ def load_florence_model(
32
+ device: torch.device, checkpoint: str = FLORENCE_CHECKPOINT
33
+ ) -> Tuple[Any, Any]:
34
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
35
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
36
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
37
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
38
+ return model, processor
39
+
40
+
41
+ def run_florence_inference(
42
+ model: Any,
43
+ processor: Any,
44
+ device: torch.device,
45
+ image: Image,
46
+ task: str,
47
+ text: str = ""
48
+ ) -> Tuple[str, Dict]:
49
+ prompt = task + text
50
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
51
+ generated_ids = model.generate(
52
+ input_ids=inputs["input_ids"],
53
+ pixel_values=inputs["pixel_values"],
54
+ max_new_tokens=1024,
55
+ num_beams=3
56
+ )
57
+ generated_text = processor.batch_decode(
58
+ generated_ids, skip_special_tokens=False)[0]
59
+ response = processor.post_process_generation(
60
+ generated_text, task=task, image_size=image.size)
61
+ return generated_text, response
62
+
63
 
64
  def detect_objects_in_image(image_input_path, texts, device):
65
  # 加载图像