ekurtulus commited on
Commit
98b5459
1 Parent(s): d7c2bc5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -1
README.md CHANGED
@@ -20,20 +20,24 @@ Loading the model:
20
  from transformers import AutoTokenizer, AutoModelForCausalLM
21
 
22
  tokenizer = AutoTokenizer.from_pretrained("HyperbeeAI/Tulpar-7b-v0")
23
- model = AutoModelForCausalLM.from_pretrained("HyperbeeAI/Tulpar-7b-v0")
24
  ```
25
 
26
  You can run inference with both of the following prompts:
27
  ```python
 
28
  prompt = f"### User: {input_text}\n\n### Assistant:\n"
29
  inputs = tokenizer(prompt, return_tensors="pt")
30
  output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=512)
 
31
  ```
32
 
33
  ```python
 
34
  prompt = f"Question: {input_text}\n\nAnswer:"
35
  inputs = tokenizer(prompt, return_tensors="pt")
36
  output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=512)
 
37
  ```
38
 
39
 
 
20
  from transformers import AutoTokenizer, AutoModelForCausalLM
21
 
22
  tokenizer = AutoTokenizer.from_pretrained("HyperbeeAI/Tulpar-7b-v0")
23
+ model = AutoModelForCausalLM.from_pretrained("HyperbeeAI/Tulpar-7b-v0", device_map="auto")
24
  ```
25
 
26
  You can run inference with both of the following prompts:
27
  ```python
28
+ input_text="What is deep learning?"
29
  prompt = f"### User: {input_text}\n\n### Assistant:\n"
30
  inputs = tokenizer(prompt, return_tensors="pt")
31
  output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=512)
32
+ print(tokenizer.decode(output[0]))
33
  ```
34
 
35
  ```python
36
+ input_text="What is deep learning?"
37
  prompt = f"Question: {input_text}\n\nAnswer:"
38
  inputs = tokenizer(prompt, return_tensors="pt")
39
  output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=512)
40
+ print(tokenizer.decode(output[0]))
41
  ```
42
 
43