Update README.md
Browse files
README.md
CHANGED
@@ -60,7 +60,7 @@ messages = [
|
|
60 |
|
61 |
prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
62 |
|
63 |
-
llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
|
64 |
|
65 |
outputs = llm.generate(prompts, sampling_params)
|
66 |
|
|
|
60 |
|
61 |
prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
62 |
|
63 |
+
llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=4096)
|
64 |
|
65 |
outputs = llm.generate(prompts, sampling_params)
|
66 |
|