J-LAB commited on
Commit
9c53151
·
verified ·
1 Parent(s): 2ff3a1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py CHANGED
@@ -20,6 +20,30 @@ processors = {
20
  DESCRIPTION = "# [Florence-2 Product Describe by Fluxi IA](https://huggingface.co/microsoft/Florence-2-large)"
21
 
22
  @spaces.GPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def process_image(image, task_prompt, text_input=None, model_id='J-LAB/Florence_2_B_FluxiAI_Product_Caption'):
24
  image = Image.fromarray(image) # Convert NumPy array to PIL Image
25
  if task_prompt == 'Product Caption':
 
20
  DESCRIPTION = "# [Florence-2 Product Describe by Fluxi IA](https://huggingface.co/microsoft/Florence-2-large)"
21
 
22
  @spaces.GPU
23
+ def run_example(task_prompt, image, text_input=None, model_id='J-LAB/Florence-Idesire'):
24
+ model = models[model_id]
25
+ processor = processors[model_id]
26
+ if text_input is None:
27
+ prompt = task_prompt
28
+ else:
29
+ prompt = task_prompt + text_input
30
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
31
+ generated_ids = model.generate(
32
+ input_ids=inputs["input_ids"],
33
+ pixel_values=inputs["pixel_values"],
34
+ max_new_tokens=1024,
35
+ early_stopping=False,
36
+ do_sample=False,
37
+ num_beams=3,
38
+ )
39
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
40
+ parsed_answer = processor.post_process_generation(
41
+ generated_text,
42
+ task=task_prompt,
43
+ image_size=(image.width, image.height)
44
+ )
45
+ return parsed_answer
46
+
47
  def process_image(image, task_prompt, text_input=None, model_id='J-LAB/Florence_2_B_FluxiAI_Product_Caption'):
48
  image = Image.fromarray(image) # Convert NumPy array to PIL Image
49
  if task_prompt == 'Product Caption':