Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForTokenClassification | |
from transformers import pipeline | |
import os | |
# Define the path where model and tokenizer files are located | |
model_directory = "AdilHayat173/token_classification" | |
# Load the model and tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(model_directory) | |
model = AutoModelForTokenClassification.from_pretrained(model_directory) | |
nlp = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") | |
st.title("Token Classification with Hugging Face") | |
# Text input from user | |
user_input = st.text_area("Enter text for token classification:", "") | |
if st.button("Classify Text"): | |
if user_input: | |
# Token classification | |
results = nlp(user_input) | |
# Display results | |
st.write("### Token Classification Results") | |
for entity in results: | |
st.write(f"**Token:** {entity['word']}") | |
st.write(f"**Label:** {entity['entity_group']}") | |
st.write(f"**Score:** {entity['score']:.4f}") | |
st.write("---") | |
else: | |
st.write("Please enter some text for classification.") | |