tangger commited on
Commit
4f8d2a8
1 Parent(s): 7cb61b2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -12
README.md CHANGED
@@ -1,9 +1,4 @@
1
- ---
2
- language:
3
- - en
4
- - zh
5
- pipeline_tag: text-generation
6
- ---
7
  # Notice/注意事项
8
  **Qwen官方模型临时下架了,我将9月11日下载到本地的Qwen-7B-Chat模型重新上传到huggingface。**
9
  **该模型为9月11日下载的版本。经过多人下载测试可以正常使用。**
@@ -90,19 +85,19 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
90
  from transformers.generation import GenerationConfig
91
 
92
  # Note: The default behavior now has injection attack prevention off.
93
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
94
 
95
  # use bf16
96
- # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
97
  # use fp16
98
- # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
99
  # use cpu only
100
- # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="cpu", trust_remote_code=True).eval()
101
  # use auto mode, automatically select precision based on the device.
102
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
103
 
104
  # Specify hyperparameters for generation
105
- model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
106
 
107
  # 第一轮对话 1st dialogue turn
108
  response, history = model.chat(tokenizer, "你好", history=None)
 
1
+
 
 
 
 
 
2
  # Notice/注意事项
3
  **Qwen官方模型临时下架了,我将9月11日下载到本地的Qwen-7B-Chat模型重新上传到huggingface。**
4
  **该模型为9月11日下载的版本。经过多人下载测试可以正常使用。**
 
85
  from transformers.generation import GenerationConfig
86
 
87
  # Note: The default behavior now has injection attack prevention off.
88
+ tokenizer = AutoTokenizer.from_pretrained("tangger/Qwen-7B-Chat", trust_remote_code=True)
89
 
90
  # use bf16
91
+ # model = AutoModelForCausalLM.from_pretrained("tangger/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
92
  # use fp16
93
+ # model = AutoModelForCausalLM.from_pretrained("tangger/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
94
  # use cpu only
95
+ # model = AutoModelForCausalLM.from_pretrained("tangger/Qwen-7B-Chat", device_map="cpu", trust_remote_code=True).eval()
96
  # use auto mode, automatically select precision based on the device.
97
+ model = AutoModelForCausalLM.from_pretrained("tangger/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
98
 
99
  # Specify hyperparameters for generation
100
+ model.generation_config = GenerationConfig.from_pretrained("tangger/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
101
 
102
  # 第一轮对话 1st dialogue turn
103
  response, history = model.chat(tokenizer, "你好", history=None)