Test_Lab10 / app.py
PRNKPS's picture
Update app.py
489a8ba
raw
history blame
269 Bytes
from transformers import AutoTokenizer, T5ForConditionalGeneration
model_name = "google/tapas-base-finetuned-wtq" # you can specify the model size here
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)