Spaces:
Sleeping
Sleeping
File size: 1,153 Bytes
2d15de0 26f400e 2d15de0 26f400e 2d15de0 26f400e 2d15de0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
import os
# Define the path where model and tokenizer files are located
model_directory = "AdilHayat173/token_classification"
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_directory)
model = AutoModelForTokenClassification.from_pretrained(model_directory)
nlp = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
st.title("Token Classification with Hugging Face")
# Text input from user
user_input = st.text_area("Enter text for token classification:", "")
if st.button("Classify Text"):
if user_input:
# Token classification
results = nlp(user_input)
# Display results
st.write("### Token Classification Results")
for entity in results:
st.write(f"**Token:** {entity['word']}")
st.write(f"**Label:** {entity['entity_group']}")
st.write(f"**Score:** {entity['score']:.4f}")
st.write("---")
else:
st.write("Please enter some text for classification.")
|