from transformers import EncoderDecoderModel, BertTokenizer
from datasets import load_dataset
from transformers import pipeline
import torch
# Load the encoder-decoder model and tokenizer from Hugging Face
encoder_decoder_model = EncoderDecoderModel.from_pretrained('sartajbhuvaji/gutenberg-bert-encoder-decoder')
tokenizer = BertTokenizer.from_pretrained("sartajbhuvaji/gutenberg-bert-encoder-decoder")
# Define the number of labels for your classification task
num_labels = 10
# Load the custom classification head
classification_model = EncoderDecoderForClassification(encoder_decoder_model, num_labels)
classification_model.load_state_dict(torch.load("gutenberg-classification-head.pth"))
# Now create a text classification pipeline
classifier = pipeline("text-classification", model=classification_model, tokenizer=tokenizer)
# Test the pipeline with a single sentence
result = classifier("This is a great book!")
print(result)
# Load sample dataset
dataset = load_dataset("sartajbhuvaji/gutenberg", split="100")
df = dataset.to_pandas()
# Test the pipeline on a document from a DataFrame (assuming `df` is a pandas DataFrame with text data)
doc_id = 1
doc_text = df.loc[df['DocID'] == doc_id, 'Text'].values[0]
result = classifier(doc_text[:1024])
print(result)
- Downloads last month
- 49
Inference Providers
NEW
This model is not currently available via any of the supported third-party Inference Providers, and
the model is not deployed on the HF Inference API.
Model tree for sartajbhuvaji/gutenberg-bert-encoder-decoder
Base model
google-bert/bert-base-uncased