File size: 1,715 Bytes
a465bd7
 
 
88855f0
a465bd7
 
 
 
 
88855f0
 
 
663f8fb
c5fee4d
88855f0
 
c06db71
82417fb
88855f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
from transformers import pipeline

# The one with 'sentiment-analysis'
pipe = pipeline('sentiment-analysis')
test = st.text_area('enter the text:')

if test:
    out = pipe(test)
    st.json(out)

# The one with "text-classification"
pipe_one = pipeline('question-answering')
test_one = st.text_area('enter more text:')

if test:
    out_one = pipe_one(test_one)
    st.json(out_one)







# text generation from youtube vid
# st.write("And now for something completely different...")
#
# default_value = "See how a modern neural network auto-completes your text using HuggingFace"
# st.write("\n\nThe King of Text Generation, GPT-2 comes in four available sizes, only three of which have been made publicly available.")
#
# sent = st.text_area("Text", defalut_value, height=275)
# max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=30)
# temperature = st.sidebar.slider("Temperature", value = 1.0, min_value=0.0, max_value=1.0, step=0.05)
# top_k = st.sidebar.slider("Top-k", min_value=0, max_value=5, value=0)
# top_p = st.sidebar.slider("top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
# num_return_sequences = st.sidebar.number_input('Number of Return Sequences', min_value=1, max_value=5, value=1, step=1)
#
# encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
# if encoded_prompt.size()[-1] == 0:
#     input_ids = None
# else:
#     input_ids = encoded_prompt
#
# output_sequences = infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences)
#
# for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
#     print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")