perlthoughts
commited on
Commit
•
b6d1859
1
Parent(s):
0100849
Update README.md
Browse files
README.md
CHANGED
@@ -25,28 +25,26 @@ ASSISTANT:
|
|
25 |
### Code example:
|
26 |
|
27 |
```python
|
28 |
-
import torch, json
|
29 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
30 |
-
|
31 |
-
|
|
|
32 |
output_file_path = "./SynthIA-7B-v2.0-conversations.jsonl"
|
33 |
-
|
34 |
model = AutoModelForCausalLM.from_pretrained(
|
35 |
model_path,
|
36 |
torch_dtype=torch.float16,
|
37 |
-
device_map=
|
38 |
load_in_8bit=False,
|
39 |
trust_remote_code=True,
|
40 |
)
|
41 |
-
|
42 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
43 |
|
44 |
-
|
45 |
def generate_text(instruction):
|
46 |
tokens = tokenizer.encode(instruction)
|
47 |
tokens = torch.LongTensor(tokens).unsqueeze(0)
|
48 |
tokens = tokens.to("cuda")
|
49 |
-
|
50 |
instance = {
|
51 |
"input_ids": tokens,
|
52 |
"top_p": 1.0,
|
@@ -54,7 +52,6 @@ def generate_text(instruction):
|
|
54 |
"generate_len": 1024,
|
55 |
"top_k": 50,
|
56 |
}
|
57 |
-
|
58 |
length = len(tokens[0])
|
59 |
with torch.no_grad():
|
60 |
rest = model.generate(
|
|
|
25 |
### Code example:
|
26 |
|
27 |
```python
|
|
|
28 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
29 |
+
import torch, json
|
30 |
+
# model path
|
31 |
+
model_path = "NurtureAI/SynthIA-7B-v2.0-16k"
|
32 |
output_file_path = "./SynthIA-7B-v2.0-conversations.jsonl"
|
33 |
+
device_map = {"": "cuda"}
|
34 |
model = AutoModelForCausalLM.from_pretrained(
|
35 |
model_path,
|
36 |
torch_dtype=torch.float16,
|
37 |
+
device_map=device_map,
|
38 |
load_in_8bit=False,
|
39 |
trust_remote_code=True,
|
40 |
)
|
41 |
+
# tokenizer
|
42 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
43 |
|
|
|
44 |
def generate_text(instruction):
|
45 |
tokens = tokenizer.encode(instruction)
|
46 |
tokens = torch.LongTensor(tokens).unsqueeze(0)
|
47 |
tokens = tokens.to("cuda")
|
|
|
48 |
instance = {
|
49 |
"input_ids": tokens,
|
50 |
"top_p": 1.0,
|
|
|
52 |
"generate_len": 1024,
|
53 |
"top_k": 50,
|
54 |
}
|
|
|
55 |
length = len(tokens[0])
|
56 |
with torch.no_grad():
|
57 |
rest = model.generate(
|