File size: 1,599 Bytes
a858739
 
 
e2c44ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c59532d
e2c44ff
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
---
license: apache-2.0
---



## usage :
```python
import os 
import torch

from transformers import AutoTokenizer, AutoModelForCausalLM

# set HF_TOKEN in terminal as export HF_TOKEN=hf_***
auth_token = os.environ.get("HF_TOKEN", True)

model_name = "Writer/camel-5b"

tokenizer = AutoTokenizer.from_pretrained(
   model_name, use_auth_token=auth_token
)
model = AutoModelForCausalLM.from_pretrained(
   model_name,
    device_map="auto",
    torch_dtype=torch.float16,
    use_auth_token=auth_token,
)


instruction = "Describe a futuristic device that revolutionizes space travel."


PROMPT_DICT = {
    "prompt_input": (
        "Below is an instruction that describes a task, paired with an input that provides further context. "
        "Write a response that appropriately completes the request\n\n"
        "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
    ),
    "prompt_no_input": (
        "Below is an instruction that describes a task. "
        "Write a response that appropriately completes the request.\n\n"
        "### Instruction:\n{instruction}\n\n### Response:"
    ),
}

text = (
    PROMPT_DICT["prompt_no_input"].format(instruction=instruction)
    if not input
    else PROMPT_DICT["prompt_input"].format(instruction=instruction, input=input)
)

model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
output_ids = model.generate(
    **model_inputs,
    max_length=100,
)
output_text = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
clean_output = output_text.split("### Response:")[1].strip()

print(clean_output)

```