victorknox commited on
Commit
c1700a4
1 Parent(s): 75f0536

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -1,28 +1,28 @@
1
  import streamlit as st
2
- #from transformers import pipeline
3
 
4
- #pipe = pipeline('sentiment-analysis')
5
- #text = st.text_area('enter some text!')
6
 
7
- #if text:
8
- # out = pipe(text)
9
- #st.json(out)
10
 
11
- from transformers import pipeline
12
 
13
- model_name = "deepset/xlm-roberta-large-squad2"
14
 
15
- qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)
16
 
17
  #predictions = []
18
 
19
  # batches might be faster
20
- ctx = st.text_area('Gib context')
21
- q = st.text_area('Gib question')
22
 
23
- if context:
24
- result = qa_pl(context=ctx, question=q)
25
- st.json(result["answer"])
26
 
27
  #for ctx, q in test_df[["context", "question"]].to_numpy():
28
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
 
4
+ pipe = pipeline('sentiment-analysis')
5
+ text = st.text_area('enter some text!')
6
 
7
+ if text:
8
+ out = pipe(text)
9
+ st.json(out)
10
 
11
+ #from transformers import pipeline
12
 
13
+ #model_name = "deepset/xlm-roberta-large-squad2"
14
 
15
+ #qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)
16
 
17
  #predictions = []
18
 
19
  # batches might be faster
20
+ #ctx = st.text_area('Gib context')
21
+ #q = st.text_area('Gib question')
22
 
23
+ #if context:
24
+ # result = qa_pl(context=ctx, question=q)
25
+ # st.json(result["answer"])
26
 
27
  #for ctx, q in test_df[["context", "question"]].to_numpy():
28