File size: 246 Bytes
fa46ed4
 
7fb8a6f
fa46ed4
 
1
2
3
4
5
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yarn-Mistral-7B-128k-GPTQ")
model = AutoModelForCausalLM.from_pretrained("TheBloke/Yarn-Mistral-7B-128k-GPTQ")