Mit1208 commited on
Commit
b57f5ff
1 Parent(s): 72781d4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +54 -1
README.md CHANGED
@@ -46,6 +46,59 @@ Use the code below to get started with the model.
46
 
47
  [More Information Needed]
48
 
49
- ### Training Procedure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
 
46
 
47
  [More Information Needed]
48
 
49
+ ### Inference Procedure
50
+
51
+ ```python
52
+
53
+ !pip install -qU transformers
54
+ !pip install -qU accelerate bitsandbytes einops flash_attn timm
55
+ !pip install -q datasets
56
+
57
+ from PIL import Image
58
+ import requests
59
+ import torch
60
+ from transformers import AutoProcessor, AutoModelForVision2Seq, BitsAndBytesConfig, TrainingArguments, AutoModelForCausalLM
61
+ import requests
62
+ import re
63
+ from transformers import AutoConfig, AutoProcessor, AutoModelForCausalLM
64
+
65
+ base_model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True,)
66
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True,)
67
+ model = AutoModelForCausalLM.from_pretrained("Mit1208/Florence-2-DocLayNet", trust_remote_code=True, config = base_model.config)
68
+
69
+ def run_example(task_prompt, image, text_input=None):
70
+ if text_input is None:
71
+ prompt = task_prompt
72
+ else:
73
+ prompt = task_prompt + text_input
74
+ print(prompt)
75
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
76
+ generated_ids = model.generate(
77
+ input_ids=inputs["input_ids"],
78
+ pixel_values=inputs["pixel_values"],
79
+ max_new_tokens=1024,
80
+ early_stopping=False,
81
+ do_sample=False,
82
+ num_beams=3,
83
+ )
84
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
85
+ print(generated_text)
86
+ parsed_answer = processor.post_process_generation(
87
+ generated_text,
88
+ task=task_prompt,
89
+ image_size=(image.width, image.height)
90
+ )
91
+
92
+ return parsed_answer
93
+
94
+ from PIL import Image
95
+ import requests
96
+
97
+ image = Image.open('form-1.png').convert('RGB')
98
+ task_prompt = '<OD>'
99
+ results = run_example(task_prompt, example['image'].resize(size=(1000, 1000)))
100
+ print(results)
101
+
102
+ ```
103
 
104
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->