Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
I will give more info but this is how to generate text with the model.
|
2 |
+
You will need to install
|
3 |
+
'''bash
|
4 |
+
pip install peft
|
5 |
+
'''
|
6 |
+
|
7 |
+
To run in python
|
8 |
+
'''python
|
9 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
10 |
+
from peft import PeftConfig, PeftModelForCausalLM
|
11 |
+
|
12 |
+
peft_model_id = 'GrantC/alpaca-opt-1.3b-lora'
|
13 |
+
BASE_MODEL = 'facebook/opt-1.3b'
|
14 |
+
config = PeftConfig.from_pretrained(peft_model_id)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(BASE_MODEL)
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
|
17 |
+
|
18 |
+
model = PeftModelForCausalLM.from_pretrained(model, peft_model_id, device_map="auto")
|
19 |
+
|
20 |
+
prompt = "Write a blog post about shaving cream:"
|
21 |
+
print(prompt)
|
22 |
+
inputs = tokenizer(prompt, return_tensors='pt')
|
23 |
+
output = model.generate(input_ids=inputs["input_ids"], do_sample= True, penalty_alpha=0.6, top_k=4, max_new_tokens=256)
|
24 |
+
outputs = tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
25 |
+
print(outputs)
|
26 |
+
'''
|