File size: 434 Bytes
c700ce7
 
 
 
 
 
 
 
 
 
 
115460a
1
2
3
4
5
6
7
8
9
10
11
12
import os

from transformers import AutoTokenizer, AutoModelForCausalLM

def get_tok_and_model(path_for_model):
    if not os.path.exists(path_for_model):
        raise RuntimeError("no cached model.")
    tok = AutoTokenizer.from_pretrained(path_for_model, padding_side='left')
    tok.pad_token_id = 50256
    # default for open-ended generation
    model = AutoModelForCausalLM.from_pretrained(path_for_model)
    return tok, model