Laurie commited on
Commit
ede08f0
1 Parent(s): c5bef22

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -6
README.md CHANGED
@@ -14,12 +14,14 @@ pipeline_tag: text2text-generation
14
 
15
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
16
  import torch
 
17
  tokenizer = AutoTokenizer.from_pretrained("baichuan7b-lora-merged", trust_remote_code=True)
18
  model = AutoModelForCausalLM.from_pretrained("baichuan7b-lora-merged", device_map="auto", trust_remote_code=True,torch_dtype=torch.float16)
19
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
20
-
21
- query = "爆炒钢筋水泥怎么做才好吃?"
22
-
23
- inputs = tokenizer(["<human>:{}\n<bot>:".format(query)], return_tensors="pt")
24
- inputs = inputs.to("cuda")
25
- generate_ids = model.generate(**inputs, max_new_tokens=256, streamer=streamer)
 
 
14
 
15
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
16
  import torch
17
+
18
  tokenizer = AutoTokenizer.from_pretrained("baichuan7b-lora-merged", trust_remote_code=True)
19
  model = AutoModelForCausalLM.from_pretrained("baichuan7b-lora-merged", device_map="auto", trust_remote_code=True,torch_dtype=torch.float16)
20
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
21
+ while True:
22
+ query = input('请输入问题:')
23
+ if len(query.strip()) == 0:
24
+ break
25
+ inputs = tokenizer(["<human>:{}\n<bot>:".format(query)], return_tensors="pt")
26
+ inputs = inputs.to("cuda")
27
+ generate_ids = model.generate(**inputs, max_new_tokens=256, streamer=streamer)