zRzRzRzRzRzRzR commited on
Commit
6324425
1 Parent(s): b84dc74

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -5
README.md CHANGED
@@ -112,10 +112,17 @@ with torch.no_grad():
112
  from transformers import AutoTokenizer
113
  from vllm import LLM, SamplingParams
114
 
 
 
 
115
  # GLM-4-9B-Chat
 
 
 
 
116
  max_model_len, tp_size = 131072, 1
117
  model_name = "THUDM/glm-4-9b-chat"
118
- prompt = '你好'
119
 
120
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
121
  llm = LLM(
@@ -124,15 +131,17 @@ llm = LLM(
124
  max_model_len=max_model_len,
125
  trust_remote_code=True,
126
  enforce_eager=True,
 
 
 
127
  )
128
  stop_token_ids = [151329, 151336, 151338]
129
  sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
130
 
131
- inputs = tokenizer.build_chat_input(prompt, history=None, role='user')['input_ids'].tolist()
132
- outputs = llm.generate(prompt_token_ids=inputs, sampling_params=sampling_params)
133
 
134
- generated_text = [output.outputs[0].text for output in outputs]
135
- print(generated_text)
136
  ```
137
 
138
  ## 协议
 
112
  from transformers import AutoTokenizer
113
  from vllm import LLM, SamplingParams
114
 
115
+ # GLM-4-9B-Chat-1M
116
+ # max_model_len, tp_size = 1048576, 4
117
+
118
  # GLM-4-9B-Chat
119
+ from transformers import AutoTokenizer
120
+ from vllm import LLM, SamplingParams
121
+
122
+ # 如果遇见 OOM 现象,建议减少max_model_len,或者增加tp_size
123
  max_model_len, tp_size = 131072, 1
124
  model_name = "THUDM/glm-4-9b-chat"
125
+ prompt = [{"role": "user", "content": "你好"}]
126
 
127
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
128
  llm = LLM(
 
131
  max_model_len=max_model_len,
132
  trust_remote_code=True,
133
  enforce_eager=True,
134
+ # GLM-4-9B-Chat-1M 如果遇见 OOM 现象,建议开启下述参数
135
+ # enable_chunked_prefill=True,
136
+ # max_num_batched_tokens=8192
137
  )
138
  stop_token_ids = [151329, 151336, 151338]
139
  sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
140
 
141
+ inputs = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
142
+ outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)
143
 
144
+ print(outputs[0].outputs[0].text)
 
145
  ```
146
 
147
  ## 协议