Update README.md
Browse files
README.md
CHANGED
@@ -31,7 +31,7 @@ import torch
|
|
31 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GemmaTokenizer
|
32 |
|
33 |
|
34 |
-
model_id = ""
|
35 |
bnb_config = BitsAndBytesConfig(
|
36 |
load_in_4bit=True,
|
37 |
bnb_4bit_quant_type="nf4",
|
@@ -41,12 +41,15 @@ bnb_config = BitsAndBytesConfig(
|
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
42 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
|
43 |
|
44 |
-
text = "
|
45 |
device = "cuda:0"
|
46 |
inputs = tokenizer(text, return_tensors="pt").to(device)
|
47 |
|
48 |
outputs = model.generate(**inputs, max_new_tokens=60)
|
49 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
|
|
|
|
|
50 |
```
|
51 |
|
52 |
## Intended uses & limitations
|
|
|
31 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GemmaTokenizer
|
32 |
|
33 |
|
34 |
+
model_id = "cocaho/outputs"
|
35 |
bnb_config = BitsAndBytesConfig(
|
36 |
load_in_4bit=True,
|
37 |
bnb_4bit_quant_type="nf4",
|
|
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
42 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
|
43 |
|
44 |
+
text = "sociological imagination is "
|
45 |
device = "cuda:0"
|
46 |
inputs = tokenizer(text, return_tensors="pt").to(device)
|
47 |
|
48 |
outputs = model.generate(**inputs, max_new_tokens=60)
|
49 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
50 |
+
|
51 |
+
# Output: sociological imagination is <strong>the ability to see the relationship between personal troubles and public issues
|
52 |
+
|
53 |
```
|
54 |
|
55 |
## Intended uses & limitations
|