File size: 838 Bytes
538a618
c1700a4
85a2ca4
c1700a4
 
85a2ca4
c1700a4
 
 
85a2ca4
c1700a4
538a618
c1700a4
85a2ca4
c1700a4
85a2ca4
 
 
 
c1700a4
 
85a2ca4
c1700a4
 
 
85a2ca4
 
538a618
85a2ca4
 
f15b059
 
b85ffd0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import streamlit as st
from transformers import pipeline

pipe = pipeline('sentiment-analysis')
text = st.text_area('enter some text!')

if text: 
 out = pipe(text)
 st.json(out)
  
#from transformers import pipeline

#model_name = "deepset/xlm-roberta-large-squad2"

#qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)

#predictions = []

# batches might be faster 
#ctx  = st.text_area('Gib context')
#q = st.text_area('Gib question')

#if context: 
#  result = qa_pl(context=ctx, question=q)
#  st.json(result["answer"])

#for ctx, q in test_df[["context", "question"]].to_numpy():

#    result = qa_pl(context=ctx, question=q)
    
#    predictions.append(result["answer"])

#model = AutoModelForQuestionAnswering.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)