alexmarques commited on
Commit
e1d6871
1 Parent(s): e0aad87

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -3
README.md CHANGED
@@ -41,6 +41,7 @@ from vllm import LLM, SamplingParams
41
  from transformers import AutoTokenizer
42
 
43
  model_id = "neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a16"
 
44
 
45
  sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256)
46
 
@@ -51,9 +52,9 @@ messages = [
51
  {"role": "user", "content": "Who are you?"},
52
  ]
53
 
54
- prompts = tokenizer.apply_chat_template(messages, tokenize=False)
55
 
56
- llm = LLM(model=model_id)
57
 
58
  outputs = llm.generate(prompts, sampling_params)
59
 
@@ -161,7 +162,7 @@ The model was evaluated on the [OpenLLM](https://huggingface.co/spaces/open-llm-
161
  ```
162
  lm_eval \
163
  --model vllm \
164
- --model_args pretrained="neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a16",dtype=auto,gpu_memory_utilization=0.4,add_bos_token=True,max_model_len=4096 \
165
  --tasks openllm \
166
  --batch_size auto
167
  ```
 
41
  from transformers import AutoTokenizer
42
 
43
  model_id = "neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a16"
44
+ number_gpus = 1
45
 
46
  sampling_params = SamplingParams(temperature=0.7, top_p=0.8, max_tokens=256)
47
 
 
52
  {"role": "user", "content": "Who are you?"},
53
  ]
54
 
55
+ prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
56
 
57
+ llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
58
 
59
  outputs = llm.generate(prompts, sampling_params)
60
 
 
162
  ```
163
  lm_eval \
164
  --model vllm \
165
+ --model_args pretrained="neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a16",dtype=auto,gpu_memory_utilization=0.4,add_bos_token=True,max_model_len=4096,tensor_parallel_size=1 \
166
  --tasks openllm \
167
  --batch_size auto
168
  ```