File size: 1,469 Bytes
538a618
51e7e8f
c1700a4
be6a27c
85a2ca4
be6a27c
 
 
 
 
 
e17ad7a
4eee87a
32d6ac4
49bf64e
 
 
 
 
 
81f9156
85a2ca4
c1700a4
538a618
c1700a4
85a2ca4
c1700a4
85a2ca4
 
 
 
c1700a4
 
85a2ca4
c1700a4
 
 
85a2ca4
 
538a618
85a2ca4
 
f15b059
 
b85ffd0
5ae1ef7
044b9cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import streamlit as st
import transformers
from transformers import pipeline
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline

model_name = 'deepset/xlm-roberta-large-squad2'

model = AutoModelForQuestionAnswering.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# a) Get predictions
ctx  = st.text_area('Context')
if ctx:
    q = st.text_area('Ask your question :)')
    nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
    #QA_input = {
    #    'question': 'Why is model conversion important?',
    #    'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
    #}
    res = nlp(context=ctx, question=q)
    st.json(res)

#from transformers import pipeline

#model_name = "deepset/xlm-roberta-large-squad2"

#qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)

#predictions = []

# batches might be faster 
#ctx  = st.text_area('Gib context')
#q = st.text_area('Gib question')

#if context: 
#  result = qa_pl(context=ctx, question=q)
#  st.json(result["answer"])

#for ctx, q in test_df[["context", "question"]].to_numpy():

#    result = qa_pl(context=ctx, question=q)
    
#    predictions.append(result["answer"])

#model = AutoModelForQuestionAnswering.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)