File size: 563 Bytes
a97f4db
 
e02d784
a97f4db
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
#from ctransformers import AutoModelForCausalLM 
#model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-OpenOrca-GGUF", model_file="mistral-7b-openorca.Q2_K.gguf", model_type="mistral")

#model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.2-GGUF")

from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig

#quantization_config = BitsAndBytesConfig(load_in_4bit=True)

tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it")
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it")