Update README.md
Browse files
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
I will give more info but this is how to generate text with the model.
|
2 |
You will need to install
|
3 |
-
|
4 |
pip install peft
|
5 |
-
|
6 |
|
7 |
To run in python
|
8 |
-
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
from peft import PeftConfig, PeftModelForCausalLM
|
11 |
|
@@ -23,4 +23,4 @@ inputs = tokenizer(prompt, return_tensors='pt')
|
|
23 |
output = model.generate(input_ids=inputs["input_ids"], do_sample= True, penalty_alpha=0.6, top_k=4, max_new_tokens=256)
|
24 |
outputs = tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
25 |
print(outputs)
|
26 |
-
|
|
|
1 |
I will give more info but this is how to generate text with the model.
|
2 |
You will need to install
|
3 |
+
```bash
|
4 |
pip install peft
|
5 |
+
```
|
6 |
|
7 |
To run in python
|
8 |
+
```python
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
from peft import PeftConfig, PeftModelForCausalLM
|
11 |
|
|
|
23 |
output = model.generate(input_ids=inputs["input_ids"], do_sample= True, penalty_alpha=0.6, top_k=4, max_new_tokens=256)
|
24 |
outputs = tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
25 |
print(outputs)
|
26 |
+
```
|