import torch
from flagai.auto_model.auto_loader import AutoLoader
from flagai.model.predictor.predictor import Predictor
from flagai.data.tokenizer import Tokenizer

model_info = "Aquila-7b-sft-10m-1"
model_dir = "your directory to model"

tokenizer = Tokenizer.from_pretrained("llama-30b-en", 
                                      cache_dir="./gpt2_new_100k/")

loader = AutoLoader("lm", model_name="llama-7b-en", 
                    only_download_config=False, 
                    use_cache=True, 
                    fp16=True,
                    model_dir=model_dir)

model = loader.get_model()
model.eval()
vocab = tokenizer.get_vocab()
id2word = {v:k for k, v in vocab.items()}
predictor = Predictor(model, tokenizer)
with torch.no_grad():
    prompt = "#用户#" + prompt + " " + "#ai助手#"
    model_in = "[CLS]" + prompt
    out, tokens, probs = predictor.predict_generate_randomsample(prompt, 
                                                    out_max_length=200, 
                                                    )
