update fast usage
Browse files
README.md
CHANGED
@@ -74,6 +74,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
74 |
from transformers.generation import GenerationConfig
|
75 |
|
76 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
|
|
|
|
|
|
|
|
|
|
|
77 |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True).eval()
|
78 |
model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
79 |
|
|
|
74 |
from transformers.generation import GenerationConfig
|
75 |
|
76 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
|
77 |
+
# use bf16
|
78 |
+
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, use_bf16=True).eval()
|
79 |
+
# use fp16
|
80 |
+
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True, use_fp16=True).eval()
|
81 |
+
# use fp32
|
82 |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", device_map="auto", trust_remote_code=True).eval()
|
83 |
model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
84 |
|