HachiML commited on
Commit
70b7127
1 Parent(s): 5e22cbb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -34,7 +34,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
34
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
35
 
36
  # Inference 1 (Instruction)
37
- prompt = "<s>[INST] お気に入りの調味料は? [/INST]"
38
  input_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
39
  tokens = model.generate(input_ids.to(device=model.device), max_new_tokens=128, temperature=0.99, top_p=0.95, do_sample=True)
40
  out = tokenizer.decode(tokens[0], skip_special_tokens=True)
 
34
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
35
 
36
  # Inference 1 (Instruction)
37
+ prompt = "[INST] お気に入りの調味料は? [/INST]"
38
  input_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
39
  tokens = model.generate(input_ids.to(device=model.device), max_new_tokens=128, temperature=0.99, top_p=0.95, do_sample=True)
40
  out = tokenizer.decode(tokens[0], skip_special_tokens=True)