theresatvan's picture
Update app.py & requirements
2a11a7e
import streamlit as st
from datasets import load_dataset
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer
decision_to_str = {'REJECTED': 0, 'ACCEPTED': 1, 'PENDING': 2, 'CONT-REJECTED': 3, 'CONT-ACCEPTED': 4, 'CONT-PENDING': 5}
dataset_dict = load_dataset('HUPD/hupd',
name='all',
data_files="https://huggingface.co/datasets/HUPD/hupd/blob/main/hupd_metadata_2022-02-22.feather",
icpr_label=None,
force_extract=True,
train_filing_start_date='2016-01-01',
train_filing_end_date='2016-01-01',
val_filing_start_date='2017-01-01',
val_filing_end_date='2017-05-31',
)
dataset = dataset_dict['validation'].filter(lambda e: e['decision'] in ['REJECTED', 'ACCEPTED'])
model_abstract = DistilBertForSequenceClassification('theresatvan/hupd-distilbert-abstract')
tokenizer_abstract = DistilBertTokenizer('theresatvan/hupd-distilbert-abstract')
model_claims = DistilBertForSequenceClassification('theresatvan/hupd-distilbert-claims')
tokenizer_claims = DistilBertTokenizer('theresatvan/hupd-distilbert-claims')
def predict(model_abstract, model_claims, tokenizer_abstract, tokenizer_claims, input):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_abstract.to(device)
model_claims.to(device)
model_abstract.eval()
model_claims.eval()
abstract, claims = input['abstract'], input['claims']
input_abstract = tokenizer_abstract(abstract, return_tensors='pt')
input_claims = tokenizer_claims(claims, return_tensors='pt')
with torch.no_grad():
outputs_abstract = model_abstract(**input_abstract)
outputs_claims = model_claims(**input_claims)
combined_prob = (outputs_abstract.logits.softmax(dim=1) + outputs_claims.logits.softmax(dim=1)) / 2
label = torch.argmax(combined_prob, dim=1)
return label, combined_prob
if __name__ == '__main__':
st.title = "Can I Patent This?"
form = st.form('patent-prediction-form')
dropdown = []
input_application = form.selectbox('Select a patent\'s application number', patents_dropdown)
submit = form.form_submit_button("Submit")
if submit:
input = dataset.filter(lambda e: e['application_number'] == input_application)
label, prob = predict(model_abstract, model_claims, tokenizer_abstract, tokenizer_claims, input)
st.write(label)
st.write(predict)
st.write(input['decision'])