File size: 948 Bytes
ba67dcd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
MODEL_NAME = "ss108/legal-citation-bert"
tokenizer = AutoTokenizer.from_pretrained("ss108/legal-citation-bert")
model = AutoModelForTokenClassification.from_pretrained("ss108/legal-citation-bert")
model.eval()
def predict(text):
# Tokenize the input text
inputs = tokenizer(text, return_tensors="pt", padding=True)
# Get model predictions
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predictions = torch.argmax(logits, dim=-1)
# Convert predictions to labels
labels = [model.config.id2label[pred.item()] for pred in predictions[0]]
# Align labels with tokens
tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])
result = [{'token': token, 'label': label} for token, label in zip(tokens, labels) if token not in tokenizer.all_special_tokens]
return result |