File size: 1,503 Bytes
bc12901
 
 
 
8171e8e
 
bc12901
 
 
 
 
 
 
 
 
 
 
 
8171e8e
 
 
 
 
 
 
 
 
bc12901
 
 
 
 
 
 
 
 
 
 
8171e8e
 
bc12901
8171e8e
bc12901
8171e8e
bc12901
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"

print("Importing")

import streamlit as st

import torch
from docquery.pipeline import get_pipeline
from docquery.document import load_bytes

def ensure_list(x):
    if isinstance(x, list):
        return x
    else:
        return [x]

@st.experimental_singleton
def construct_pipeline():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    ret = get_pipeline(device=device)
    return ret

@st.cache
def run_pipeline(question, document):
    return construct_pipeline()(question=question, **document.context)

st.title("DocQuery: Query Documents Using NLP")
file = st.file_uploader("Upload a PDF or Image document")
question = st.text_input("QUESTION", "")

if file is not None:
    col1, col2 = st.columns(2)

    document = load_bytes(file, file.name)
    col1.image(document.preview, use_column_width=True)

if file is not None and question is not None and len(question) > 0:
    predictions = run_pipeline(question=question, document=document)

    col2.header("Answers")
    for p in ensure_list(predictions):
        col2.subheader(f"{ p['answer'] }: ({round(p['score'] * 100, 1)}%)")


"DocQuery uses LayoutLMv1 fine-tuned on DocVQA, a document visual question answering dataset, as well as SQuAD, which boosts its English-language comprehension. To use it, simply upload an image or PDF, type a question, and click 'submit', or click one of the examples to load them."

"[Github Repo](https://github.com/impira/docquery)"