from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
import torch

# Check if GPU is available
device = 0 if torch.cuda.is_available() else -1
print(f"Using device: {'GPU' if device == 0 else 'CPU'}")

# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("dslim/bert-large-NER")
model = AutoModelForTokenClassification.from_pretrained("dslim/bert-large-NER")

# Create NER pipeline
ner_pipeline = pipeline(
    "ner",
    model=model,
    tokenizer=tokenizer,
    aggregation_strategy="simple",
    device=device,
)

# Input text
text = "Elon Musk announced that Tesla is opening a new factory in Berlin next year."

# Process the text and extract entities
entities = ner_pipeline(text)

# Display named entities
print("\nNamed Entities:")
for entity in entities:
    print(f"{entity['word']} - {entity['entity_group']} (Score: {entity['score']:.3f})")
