x54-729 commited on
Commit
a81187a
1 Parent(s): 695532d

fix example prompt

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -130,7 +130,7 @@ tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-20b", trust_remote
130
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,有可能导致显存不足
131
  model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-20b", torch_dtype=torch.float16, trust_remote_code=True).cuda()
132
  model = model.eval()
133
- inputs = tokenizer(["来到美丽的大自然,我们发现"], return_tensors="pt")
134
  for k,v in inputs.items():
135
  inputs[k] = v.cuda()
136
  gen_kwargs = {"max_length": 128, "top_p": 0.8, "temperature": 0.8, "do_sample": True, "repetition_penalty": 1.0}
 
130
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,有可能导致显存不足
131
  model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-20b", torch_dtype=torch.float16, trust_remote_code=True).cuda()
132
  model = model.eval()
133
+ inputs = tokenizer(["来到美丽的大自然"], return_tensors="pt")
134
  for k,v in inputs.items():
135
  inputs[k] = v.cuda()
136
  gen_kwargs = {"max_length": 128, "top_p": 0.8, "temperature": 0.8, "do_sample": True, "repetition_penalty": 1.0}