Donato8408 commited on
Commit
7b9032e
1 Parent(s): 56de39c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -1,3 +1,25 @@
1
  import gradio as gr
 
 
2
 
3
- gr.Interface.load("models/impira/layoutlm-document-qa").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering
3
+ import torch
4
 
5
+ # Load model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
7
+ model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased")
8
+
9
+ # Define function to predict answer
10
+ def predict_answer(context, question):
11
+ encoding = tokenizer.encode_plus(question, context, return_tensors="pt")
12
+ input_ids = encoding["input_ids"]
13
+ attention_mask = encoding["attention_mask"]
14
+ start_scores, end_scores = model(input_ids, attention_mask=attention_mask, return_dict=False)
15
+ start_index = torch.argmax(start_scores)
16
+ end_index = torch.argmax(end_scores)
17
+ answer_tokens = input_ids[0][start_index:end_index+1]
18
+ answer = tokenizer.decode(answer_tokens)
19
+ return answer
20
+
21
+ # Define Gradio interface
22
+ context_input = gr.inputs.Textbox(label="Context")
23
+ question_input = gr.inputs.Textbox(label="Question")
24
+ output_text = gr.outputs.Textbox(label="Answer")
25
+ gr.Interface(predict_answer, inputs=[context_input, question_input], outputs=output_text, title="LayoutLM Document QA").launch()