Spaces:
Runtime error
Runtime error
File size: 2,989 Bytes
e4296b4 567db95 2a11a7e 567db95 2a11a7e 068cd45 2a11a7e 8da68fd 2a11a7e e4296b4 2a11a7e 539470a 2a11a7e 539470a 2a11a7e 539470a 2a11a7e ad42ce4 eb311d6 e4296b4 2a11a7e eb311d6 2a11a7e 8da68fd 2a11a7e e4296b4 2a11a7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import torch
import streamlit as st
from datasets import load_dataset
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer
decision_to_str = {'REJECTED': 0, 'ACCEPTED': 1, 'PENDING': 2, 'CONT-REJECTED': 3, 'CONT-ACCEPTED': 4, 'CONT-PENDING': 5}
dataset_dict = load_dataset('HUPD/hupd',
name='sample',
data_files="https://huggingface.co/datasets/HUPD/hupd/blob/main/hupd_metadata_2022-02-22.feather",
icpr_label=None,
train_filing_start_date='2016-01-01',
train_filing_end_date='2016-01-21',
val_filing_start_date='2016-01-22',
val_filing_end_date='2016-01-31',
)
dataset = dataset_dict['validation']
model_abstract = DistilBertForSequenceClassification.from_pretrained('theresatvan/hupd-distilbert-abstract')
tokenizer_abstract = DistilBertTokenizer.from_pretrained('theresatvan/hupd-distilbert-abstract')
model_claims = DistilBertForSequenceClassification.from_pretrained('theresatvan/hupd-distilbert-claims')
tokenizer_claims = DistilBertTokenizer.from_pretrained('theresatvan/hupd-distilbert-claims')
def predict(model_abstract, model_claims, tokenizer_abstract, tokenizer_claims, input):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_abstract.to(device)
model_claims.to(device)
model_abstract.eval()
model_claims.eval()
abstract, claims = input['abstract'], input['claims']
encoding_abstract = tokenizer_abstract(abstract, return_tensors='pt', truncation=True, padding='max_length')
encoding_claims = tokenizer_claims(claims, return_tensors='pt', truncation=True, padding='max_length')
input_abstract = encoding_abstract['input_ids'].to(device)
attention_mask_abstract = encoding_abstract['attention_mask'].to(device)
input_claims = encoding_claims['input_ids'].to(device)
attention_mask_claims = encoding_claims['attention_mask'].to(device)
with torch.no_grad():
outputs_abstract = model_abstract(input_ids=input_abstract)
outputs_claims = model_claims(input_ids=input_claims)
print(outputs_abstract.logits)
print(outputs_claims.logits)
combined_prob = (outputs_abstract.logits.softmax(dim=1) + outputs_claims.logits.softmax(dim=1)) / 2
label = torch.argmax(combined_prob, dim=1)
return label, combined_prob.tolist()[0]
if __name__ == '__main__':
st.title = "Can I Patent This?"
form = st.form('patent-prediction-form')
dropdown = [example['patent_number'] for example in dataset]
input_application = form.selectbox('Select a patent\'s application number', dropdown)
submit = form.form_submit_button("Submit")
if submit:
input = dataset.filter(lambda e: e['patent_number'] == input_application)
label, prob = predict(model_abstract, model_claims, tokenizer_abstract, tokenizer_claims, input)
st.write(label)
st.write(prob)
st.write(input['decision'])
|