macadeliccc
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -35,8 +35,8 @@ This model has been exposed to a wide variety of data. [macadeliccc/gemma-functi
|
|
35 |
```python
|
36 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
37 |
|
38 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
39 |
-
model = AutoModelForCausalLM.from_pretrained("
|
40 |
|
41 |
input_text = "Write me a poem about Machine Learning."
|
42 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
@@ -53,8 +53,8 @@ print(tokenizer.decode(outputs[0]))
|
|
53 |
# pip install accelerate
|
54 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
55 |
|
56 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
57 |
-
model = AutoModelForCausalLM.from_pretrained("
|
58 |
|
59 |
input_text = "Write me a poem about Machine Learning."
|
60 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -72,8 +72,8 @@ print(tokenizer.decode(outputs[0]))
|
|
72 |
# pip install accelerate
|
73 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
74 |
|
75 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
76 |
-
model = AutoModelForCausalLM.from_pretrained("
|
77 |
|
78 |
input_text = "Write me a poem about Machine Learning."
|
79 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -88,8 +88,8 @@ print(tokenizer.decode(outputs[0]))
|
|
88 |
# pip install accelerate
|
89 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
90 |
|
91 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
92 |
-
model = AutoModelForCausalLM.from_pretrained("
|
93 |
|
94 |
input_text = "Write me a poem about Machine Learning."
|
95 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -108,8 +108,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
108 |
|
109 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
110 |
|
111 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
112 |
-
model = AutoModelForCausalLM.from_pretrained("
|
113 |
|
114 |
input_text = "Write me a poem about Machine Learning."
|
115 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -126,8 +126,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
126 |
|
127 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
128 |
|
129 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
130 |
-
model = AutoModelForCausalLM.from_pretrained("
|
131 |
|
132 |
input_text = "Write me a poem about Machine Learning."
|
133 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
35 |
```python
|
36 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
37 |
|
38 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
39 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
40 |
|
41 |
input_text = "Write me a poem about Machine Learning."
|
42 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
|
|
53 |
# pip install accelerate
|
54 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
55 |
|
56 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
57 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto")
|
58 |
|
59 |
input_text = "Write me a poem about Machine Learning."
|
60 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
72 |
# pip install accelerate
|
73 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
74 |
|
75 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
76 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto", torch_dtype=torch.float16)
|
77 |
|
78 |
input_text = "Write me a poem about Machine Learning."
|
79 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
88 |
# pip install accelerate
|
89 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
90 |
|
91 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
92 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto", torch_dtype=torch.bfloat16)
|
93 |
|
94 |
input_text = "Write me a poem about Machine Learning."
|
95 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
108 |
|
109 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
110 |
|
111 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
112 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", quantization_config=quantization_config)
|
113 |
|
114 |
input_text = "Write me a poem about Machine Learning."
|
115 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
126 |
|
127 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
128 |
|
129 |
+
tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
|
130 |
+
model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", quantization_config=quantization_config)
|
131 |
|
132 |
input_text = "Write me a poem about Machine Learning."
|
133 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|