jetaudio commited on
Commit
7ca8e1a
1 Parent(s): 622a099

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +34 -0
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###GPU
2
+ ```python
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+
6
+ def generate_prompt(instruction, input=""):
7
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
8
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
9
+ if input:
10
+ return f"""Instruction: {instruction}
11
+
12
+ Input: {input}
13
+
14
+ Response:"""
15
+ else:
16
+ return f"""User: hi
17
+
18
+ Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
19
+
20
+ User: {instruction}
21
+
22
+ Assistant:"""
23
+
24
+
25
+ model = AutoModelForCausalLM.from_pretrained("jetaudio/rwkv-5-v2-3b-16k", trust_remote_code=True, torch_dtype=torch.bfloat16).to(0)
26
+ tokenizer = AutoTokenizer.from_pretrained("jetaudio/rwkv-5-v2-3b-16k", trust_remote_code=True)
27
+
28
+ text = "介绍一下大熊猫"
29
+ prompt = generate_prompt(text)
30
+
31
+ inputs = tokenizer(prompt, return_tensors="pt").to(0)
32
+ output = model.generate(inputs["input_ids"], max_new_tokens=128, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, )
33
+ print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True))
34
+ ```