oracat's picture
Initial commit
429523a
raw history blame
No virus
4.49 kB
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import streamlit as st
@st.cache_data
def prepare_model():
"""
Prepare the tokenizer and the model for classification.
"""
tokenizer = AutoTokenizer.from_pretrained("oracat/bert-paper-classifier-arxiv")
model = AutoModelForSequenceClassification.from_pretrained(
"oracat/bert-paper-classifier-arxiv"
)
return (tokenizer, model)
def process(text):
"""
Translate incoming text to tokens and classify it
"""
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
result = pipe(text)[0]
return result["label"]
tokenizer, model = prepare_model()
# State managements
#
# The state in the app is the title and the abstract.
# State management is used here in order to pre-fill
# input fields with values for demos.
if "title" not in st.session_state:
st.session_state["title"] = ""
if "abstract" not in st.session_state:
st.session_state["abstract"] = ""
if "output" not in st.session_state:
st.session_state["output"] = ""
# Simple streamlit interface
st.markdown("### Hello, paper classifier!")
## Demo buttons and their callbacks
def demo_cl_callback():
"""
Use https://ai.facebook.com/blog/large-language-model-llama-meta-ai/ for demo
"""
paper_title = (
"Introducing LLaMA: A foundational, 65-billion-parameter large language model"
)
paper_abstract = "Over the last year, large language models β€” natural language processing (NLP) systems with billions of parameters β€” have shown new capabilities to generate creative text, solve mathematical theorems, predict protein structures, answer reading comprehension questions, and more. They are one of the clearest cases of the substantial potential benefits AI can offer at scale to billions of people. Smaller models trained on more tokens β€” which are pieces of words β€” are easier to retrain and fine-tune for specific potential product use cases. We trained LLaMA 65B and LLaMA 33B on 1.4 trillion tokens. Our smallest model, LLaMA 7B, is trained on one trillion tokens. Like other large language models, LLaMA works by taking a sequence of words as an input and predicts a next word to recursively generate text. To train our model, we chose text from the 20 languages with the most speakers, focusing on those with Latin and Cyrillic alphabets."
st.session_state["title"] = paper_title
st.session_state["abstract"] = paper_abstract
def demo_cv_callback():
"""
Use https://arxiv.org/abs/2010.11929 for demo
"""
paper_title = (
"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"
)
paper_abstract = "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train."
st.session_state["title"] = paper_title
st.session_state["abstract"] = paper_abstract
def clear_callback():
"""
Clear input fields
"""
st.session_state["title"] = ""
st.session_state["abstract"] = ""
st.session_state["output"] = ""
col1, col2, col3 = st.columns([1, 1, 1])
with col1:
st.button("Demo: LLaMA paper", on_click=demo_cl_callback)
with col2:
st.button("Demo: ViT paper", on_click=demo_cv_callback)
with col3:
st.button("Clear fields", on_click=clear_callback)
## Input fields
placeholder = st.empty()
title = st.text_input("Enter the title:", key="title")
abstract = st.text_area(
"... and maybe the abstract of the paper you want to classify:", key="abstract"
)
text = "\n".join([title, abstract])
## Output
if len(text.strip()) > 0:
st.markdown(f"<h4>Predicted class: {process(text)}</h4>", unsafe_allow_html=True)