rskuzma commited on
Commit
a9e1351
1 Parent(s): 42c85d6

add torch_dtype="auto" to load model weights in bf16

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -41,7 +41,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
41
 
42
  # Load the tokenizer and model
43
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
44
- model = AutoModelForCausalLM.from_pretrained("cerebras/-3b-8k-base", trust_remote_code=True)
45
 
46
  # Set the prompt for generating text
47
  prompt = "Albert Einstein was known for "
@@ -72,7 +72,7 @@ from transformers import pipeline
72
 
73
  # Load the tokenizer and model
74
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
75
- model = AutoModelForCausalLM.from_pretrained("cerebras/btlm-3b-8k-base", trust_remote_code=True)
76
 
77
  # Set the prompt for text generation
78
  prompt = """Isaac Newton was a """
 
41
 
42
  # Load the tokenizer and model
43
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
44
+ model = AutoModelForCausalLM.from_pretrained("cerebras/-3b-8k-base", trust_remote_code=True, torch_dtype="auto")
45
 
46
  # Set the prompt for generating text
47
  prompt = "Albert Einstein was known for "
 
72
 
73
  # Load the tokenizer and model
74
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
75
+ model = AutoModelForCausalLM.from_pretrained("cerebras/btlm-3b-8k-base", trust_remote_code=True, torch_dtype="auto")
76
 
77
  # Set the prompt for text generation
78
  prompt = """Isaac Newton was a """