from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
model = AutoModelForSeq2SeqLM.from_pretrained("ArvinZhuang/BiTAG-t5-large")
tokenizer = AutoTokenizer.from_pretrained("ArvinZhuang/BiTAG-t5-large")
text = "abstract: [your abstract]" # use 'title:' as the prefix for title_to_abs task.
input_ids = tokenizer.encode(text, return_tensors='pt')
outputs = model.generate(
input_ids,
do_sample=True,
max_length=500,
top_p=0.9,
top_k=20,
temperature=1,
num_return_sequences=10,
)
print("Output:\n" + 100 * '-')
for i, output in enumerate(outputs):
print("{}: {}".format(i+1, tokenizer.decode(output, skip_special_tokens=True)))
- Downloads last month
- 6
Inference Providers
NEW
This model is not currently available via any of the supported third-party Inference Providers, and
the model is not deployed on the HF Inference API.