losyer8 commited on
Commit
ff1fcd2
1 Parent(s): 6e89f8f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -49,7 +49,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
49
  - torch>=2.0.0
50
  - transformers>=4.34.0
51
  - tokenizers>=0.14.0
52
-
53
 
54
  ## Usage
55
 
@@ -57,7 +57,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
57
  import torch
58
  from transformers import AutoTokenizer, AutoModelForCausalLM
59
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-instruct-full-dolly-oasst-v1.0")
60
- model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-instruct-full-dolly-oasst-v1.0", torch_dtype=torch.float16)
61
  text = "自然言語処理とは何か"
62
  text = text + "### 回答:"
63
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
 
49
  - torch>=2.0.0
50
  - transformers>=4.34.0
51
  - tokenizers>=0.14.0
52
+ - accelerate==0.23.0
53
 
54
  ## Usage
55
 
 
57
  import torch
58
  from transformers import AutoTokenizer, AutoModelForCausalLM
59
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-instruct-full-dolly-oasst-v1.0")
60
+ model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-instruct-full-dolly-oasst-v1.0", device_map="auto", torch_dtype=torch.float16)
61
  text = "自然言語処理とは何か"
62
  text = text + "### 回答:"
63
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)