losyer8 commited on
Commit
343b15a
1 Parent(s): 86b2954

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -49,6 +49,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
49
  - torch>=2.0.0
50
  - transformers>=4.34.0
51
  - tokenizers>=0.14.0
 
52
 
53
  ## Usage
54
 
@@ -56,7 +57,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
56
  import torch
57
  from transformers import AutoTokenizer, AutoModelForCausalLM
58
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-1.3b-v1.0")
59
- model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-1.3b-v1.0", torch_dtype=torch.float16)
60
  text = "自然言語処理とは何か"
61
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
62
  with torch.no_grad():
 
49
  - torch>=2.0.0
50
  - transformers>=4.34.0
51
  - tokenizers>=0.14.0
52
+ - accelerate==0.23.0
53
 
54
  ## Usage
55
 
 
57
  import torch
58
  from transformers import AutoTokenizer, AutoModelForCausalLM
59
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-1.3b-v1.0")
60
+ model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-1.3b-v1.0", device_map="auto", torch_dtype=torch.float16)
61
  text = "自然言語処理とは何か"
62
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
63
  with torch.no_grad():