hellohfworld / app.py
mattpat's picture
new update
663f8fb
import streamlit as st
from transformers import pipeline
# The one with 'sentiment-analysis'
pipe = pipeline('sentiment-analysis')
test = st.text_area('enter the text:')
if test:
out = pipe(test)
st.json(out)
# The one with "text-classification"
pipe_one = pipeline('question-answering')
test_one = st.text_area('enter more text:')
if test:
out_one = pipe_one(test_one)
st.json(out_one)
# text generation from youtube vid
# st.write("And now for something completely different...")
#
# default_value = "See how a modern neural network auto-completes your text using HuggingFace"
# st.write("\n\nThe King of Text Generation, GPT-2 comes in four available sizes, only three of which have been made publicly available.")
#
# sent = st.text_area("Text", defalut_value, height=275)
# max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=30)
# temperature = st.sidebar.slider("Temperature", value = 1.0, min_value=0.0, max_value=1.0, step=0.05)
# top_k = st.sidebar.slider("Top-k", min_value=0, max_value=5, value=0)
# top_p = st.sidebar.slider("top-p", min_value=0.0, max_value=1.0, step=0.05, value=0.9)
# num_return_sequences = st.sidebar.number_input('Number of Return Sequences', min_value=1, max_value=5, value=1, step=1)
#
# encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
# if encoded_prompt.size()[-1] == 0:
# input_ids = None
# else:
# input_ids = encoded_prompt
#
# output_sequences = infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences)
#
# for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
# print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")