Norm commited on
Commit
df9d779
1 Parent(s): 2484535

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +26 -20
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- license: afl-3.0
3
  ---
4
 
5
  # ERNIE-Layout_Pytorch
@@ -10,45 +10,45 @@ The model is translated from [PaddlePaddle/ernie-layoutx-base-uncased](https://h
10
  **A Quick Example**
11
  ```python
12
  import torch
13
- from networks.modeling_erine_layout import ErnieLayoutConfig, ErnieLayoutForQuestionAnswering
14
- from networks.feature_extractor import ErnieFeatureExtractor
15
- from networks.tokenizer import ErnieLayoutTokenizer
16
- from networks.model_util import ernie_qa_tokenize, prepare_context_info
17
  from PIL import Image
 
 
 
 
 
18
 
19
-
20
- pretrain_torch_model_or_path = "path/to/pretrained/mode"
21
- doc_imag_path = "path/to/doc/image"
22
 
23
  device = torch.device("cuda:0")
24
 
25
- # initialize tokenizer
26
- tokenizer = ErnieLayoutTokenizer.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
27
  context = ['This is an example document', 'All ocr boxes are inserted into this list']
28
  layout = [[381, 91, 505, 115], [738, 96, 804, 122]] # all boxes are resized between 0 - 1000
 
 
 
 
29
 
30
  # initialize feature extractor
31
- feature_extractor = ErnieFeatureExtractor()
 
32
 
33
  # Tokenize context & questions
34
- context_encodings = prepare_context_info(tokenizer, context, layout)
35
  question = "what is it?"
36
- tokenized_res = ernie_qa_tokenize(tokenizer, question, context_encodings)
37
  tokenized_res['input_ids'] = torch.tensor([tokenized_res['input_ids']]).to(device)
38
  tokenized_res['bbox'] = torch.tensor([tokenized_res['bbox']]).to(device)
 
39
 
40
- # answer start && end index
41
  tokenized_res['start_positions'] = torch.tensor([6]).to(device)
42
  tokenized_res['end_positions'] = torch.tensor([12]).to(device)
43
 
44
-
45
- # open the image of the document and process image
46
- tokenized_res['pixel_values'] = feature_extractor(Image.open(doc_imag_path).convert("RGB")).unsqueeze(0).to(device)
47
-
48
-
49
  # initialize config
50
  config = ErnieLayoutConfig.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
51
- config.num_classes = 2 # start and end
52
 
53
  # initialize ERNIE for VQA
54
  model = ErnieLayoutForQuestionAnswering.from_pretrained(
@@ -59,5 +59,11 @@ model.to(device)
59
 
60
  output = model(**tokenized_res)
61
 
 
 
 
 
 
 
62
 
63
  ```
 
1
  ---
2
+ license: mit
3
  ---
4
 
5
  # ERNIE-Layout_Pytorch
 
10
  **A Quick Example**
11
  ```python
12
  import torch
 
 
 
 
13
  from PIL import Image
14
+ import numpy as np
15
+ import torch.nn.functional as F
16
+ from networks.model_util import ernie_qa_processing
17
+ from networks import ErnieLayoutConfig, ErnieLayoutForQuestionAnswering, ErnieLayoutImageProcessor, \
18
+ ERNIELayoutProcessor, ErnieLayoutTokenizerFast
19
 
20
+ pretrain_torch_model_or_path = "Norm/ERNIE-Layout-Pytorch"
21
+ doc_imag_path = "/path/to/dummy_input.jpeg"
 
22
 
23
  device = torch.device("cuda:0")
24
 
25
+ # Dummy Input
 
26
  context = ['This is an example document', 'All ocr boxes are inserted into this list']
27
  layout = [[381, 91, 505, 115], [738, 96, 804, 122]] # all boxes are resized between 0 - 1000
28
+ pil_image = Image.open(doc_imag_path).convert("RGB")
29
+
30
+ # initialize tokenizer
31
+ tokenizer = ErnieLayoutTokenizerFast.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
32
 
33
  # initialize feature extractor
34
+ feature_extractor = ErnieLayoutImageProcessor(apply_ocr=False)
35
+ processor = ERNIELayoutProcessor(image_processor=feature_extractor, tokenizer=tokenizer)
36
 
37
  # Tokenize context & questions
38
+ context_encodings = processor(pil_image, context)
39
  question = "what is it?"
40
+ tokenized_res = ernie_qa_processing(tokenizer, question, layout, context_encodings)
41
  tokenized_res['input_ids'] = torch.tensor([tokenized_res['input_ids']]).to(device)
42
  tokenized_res['bbox'] = torch.tensor([tokenized_res['bbox']]).to(device)
43
+ tokenized_res['pixel_values'] = torch.tensor(np.array(context_encodings.data['pixel_values'])).to(device)
44
 
45
+ # dummy answer start && end index
46
  tokenized_res['start_positions'] = torch.tensor([6]).to(device)
47
  tokenized_res['end_positions'] = torch.tensor([12]).to(device)
48
 
 
 
 
 
 
49
  # initialize config
50
  config = ErnieLayoutConfig.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
51
+ config.num_classes = 2 # start and end
52
 
53
  # initialize ERNIE for VQA
54
  model = ErnieLayoutForQuestionAnswering.from_pretrained(
 
59
 
60
  output = model(**tokenized_res)
61
 
62
+ # decode output
63
+ start_max = torch.argmax(F.softmax(output.start_logits, dim=-1))
64
+ end_max = torch.argmax(F.softmax(output.end_logits, dim=-1)) + 1 # add one ##because of python list indexing
65
+ answer = tokenizer.decode(tokenized_res["input_ids"][0][start_max: end_max])
66
+ print(answer)
67
+
68
 
69
  ```