fix(README): remove `trust_remote_code` usage after `transformers==4.38.0` support
Browse files
README.md
CHANGED
@@ -26,7 +26,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
26 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
"stabilityai/stablelm-3b-4e1t",
|
29 |
-
trust_remote_code=True,
|
30 |
torch_dtype="auto",
|
31 |
)
|
32 |
model.cuda()
|
@@ -51,7 +50,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
51 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
|
52 |
model = AutoModelForCausalLM.from_pretrained(
|
53 |
"stabilityai/stablelm-3b-4e1t",
|
54 |
-
trust_remote_code=True,
|
55 |
torch_dtype="auto",
|
56 |
attn_implementation="flash_attention_2",
|
57 |
)
|
|
|
26 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
"stabilityai/stablelm-3b-4e1t",
|
|
|
29 |
torch_dtype="auto",
|
30 |
)
|
31 |
model.cuda()
|
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
|
51 |
model = AutoModelForCausalLM.from_pretrained(
|
52 |
"stabilityai/stablelm-3b-4e1t",
|
|
|
53 |
torch_dtype="auto",
|
54 |
attn_implementation="flash_attention_2",
|
55 |
)
|