Taka008 commited on
Commit
da802b0
β€’
1 Parent(s): 524b999

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -2
README.md CHANGED
@@ -63,9 +63,11 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
63
 
64
  ```python
65
  import torch
66
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
67
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-dpo-lora-hh_rlhf_ja-v1.1")
68
- model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-dpo-lora-hh_rlhf_ja-v1.1", device_map="auto", torch_dtype=torch.float16)
69
  text = "δ»₯下は、タスクをθͺ¬ζ˜Žγ™γ‚‹ζŒ‡η€Ίγ§γ™γ€‚θ¦ζ±‚γ‚’ι©εˆ‡γ«ζΊ€γŸγ™εΏœη­”γ‚’ζ›Έγγͺさい。\n\n### ζŒ‡η€Ί:\n{instruction}\n\n### εΏœη­”:\n".format(instruction="θ‡ͺ焢言θͺžε‡¦η†γ¨γ―何か")
70
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
71
  with torch.no_grad():
 
63
 
64
  ```python
65
  import torch
66
+ from transformers import AutoTokenizer
67
+ from peft import AutoPeftModelForCausalLM
68
+
69
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-dpo-lora-hh_rlhf_ja-v1.1")
70
+ model = AutoPeftModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-dpo-lora-hh_rlhf_ja-v1.1", device_map="auto", torch_dtype=torch.float16)
71
  text = "δ»₯下は、タスクをθͺ¬ζ˜Žγ™γ‚‹ζŒ‡η€Ίγ§γ™γ€‚θ¦ζ±‚γ‚’ι©εˆ‡γ«ζΊ€γŸγ™εΏœη­”γ‚’ζ›Έγγͺさい。\n\n### ζŒ‡η€Ί:\n{instruction}\n\n### εΏœη­”:\n".format(instruction="θ‡ͺ焢言θͺžε‡¦η†γ¨γ―何か")
72
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
73
  with torch.no_grad():