macadeliccc commited on
Commit
1e92226
·
verified ·
1 Parent(s): 2ec216a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -12
README.md CHANGED
@@ -35,8 +35,8 @@ This model has been exposed to a wide variety of data. [macadeliccc/gemma-functi
35
  ```python
36
  from transformers import AutoTokenizer, AutoModelForCausalLM
37
 
38
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
39
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
40
 
41
  input_text = "Write me a poem about Machine Learning."
42
  input_ids = tokenizer(input_text, return_tensors="pt")
@@ -53,8 +53,8 @@ print(tokenizer.decode(outputs[0]))
53
  # pip install accelerate
54
  from transformers import AutoTokenizer, AutoModelForCausalLM
55
 
56
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
57
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto")
58
 
59
  input_text = "Write me a poem about Machine Learning."
60
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -72,8 +72,8 @@ print(tokenizer.decode(outputs[0]))
72
  # pip install accelerate
73
  from transformers import AutoTokenizer, AutoModelForCausalLM
74
 
75
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
76
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.float16)
77
 
78
  input_text = "Write me a poem about Machine Learning."
79
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -88,8 +88,8 @@ print(tokenizer.decode(outputs[0]))
88
  # pip install accelerate
89
  from transformers import AutoTokenizer, AutoModelForCausalLM
90
 
91
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
92
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.bfloat16)
93
 
94
  input_text = "Write me a poem about Machine Learning."
95
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -108,8 +108,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
108
 
109
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
110
 
111
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
112
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config)
113
 
114
  input_text = "Write me a poem about Machine Learning."
115
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -126,8 +126,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
126
 
127
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
128
 
129
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
130
- model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config)
131
 
132
  input_text = "Write me a poem about Machine Learning."
133
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
35
  ```python
36
  from transformers import AutoTokenizer, AutoModelForCausalLM
37
 
38
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
39
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
40
 
41
  input_text = "Write me a poem about Machine Learning."
42
  input_ids = tokenizer(input_text, return_tensors="pt")
 
53
  # pip install accelerate
54
  from transformers import AutoTokenizer, AutoModelForCausalLM
55
 
56
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
57
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto")
58
 
59
  input_text = "Write me a poem about Machine Learning."
60
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
72
  # pip install accelerate
73
  from transformers import AutoTokenizer, AutoModelForCausalLM
74
 
75
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
76
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto", torch_dtype=torch.float16)
77
 
78
  input_text = "Write me a poem about Machine Learning."
79
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
88
  # pip install accelerate
89
  from transformers import AutoTokenizer, AutoModelForCausalLM
90
 
91
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
92
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", device_map="auto", torch_dtype=torch.bfloat16)
93
 
94
  input_text = "Write me a poem about Machine Learning."
95
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
108
 
109
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
110
 
111
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
112
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", quantization_config=quantization_config)
113
 
114
  input_text = "Write me a poem about Machine Learning."
115
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
126
 
127
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
128
 
129
+ tokenizer = AutoTokenizer.from_pretrained("macadeliccc/gemma-orchid-7b-dpo")
130
+ model = AutoModelForCausalLM.from_pretrained("macadeliccc/gemma-orchid-7b-dpo", quantization_config=quantization_config)
131
 
132
  input_text = "Write me a poem about Machine Learning."
133
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")