File size: 269 Bytes
489a8ba
a3df29c
489a8ba
 
 
1
2
3
4
5
from transformers import AutoTokenizer, T5ForConditionalGeneration

model_name = "google/tapas-base-finetuned-wtq" # you can specify the model size here
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)