Update README.md
Browse files
README.md
CHANGED
@@ -37,7 +37,7 @@ language:
|
|
37 |
<th>PiQA</th>
|
38 |
</tr>
|
39 |
<tr>
|
40 |
-
<td><a href="https://huggingface.co/datatab/Yugo55-GPT-v4-4bit/"
|
41 |
<td>51.41</td>
|
42 |
<td>36.00</td>
|
43 |
<td>57.51</td>
|
@@ -95,4 +95,97 @@ models:
|
|
95 |
merge_method: linear
|
96 |
dtype: float16
|
97 |
|
98 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
<th>PiQA</th>
|
38 |
</tr>
|
39 |
<tr>
|
40 |
+
<td><a href="https://huggingface.co/datatab/Yugo55-GPT-v4-4bit/">*Yugo55-GPT-v4-4bit</a></td>
|
41 |
<td>51.41</td>
|
42 |
<td>36.00</td>
|
43 |
<td>57.51</td>
|
|
|
95 |
merge_method: linear
|
96 |
dtype: float16
|
97 |
|
98 |
+
```
|
99 |
+
|
100 |
+
|
101 |
+
## 💻 Usage
|
102 |
+
```terminal
|
103 |
+
!pip -q install git+https://github.com/huggingface/transformers # need to install from github
|
104 |
+
!pip install -q datasets loralib sentencepiece
|
105 |
+
!pip -q install bitsandbytes accelerate
|
106 |
+
```
|
107 |
+
|
108 |
+
```python
|
109 |
+
from IPython.display import HTML, display
|
110 |
+
|
111 |
+
def set_css():
|
112 |
+
display(HTML('''
|
113 |
+
<style>
|
114 |
+
pre {
|
115 |
+
white-space: pre-wrap;
|
116 |
+
}
|
117 |
+
</style>
|
118 |
+
'''))
|
119 |
+
get_ipython().events.register('pre_run_cell', set_css)
|
120 |
+
|
121 |
+
```
|
122 |
+
|
123 |
+
```python
|
124 |
+
import torch
|
125 |
+
import transformers
|
126 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
127 |
+
|
128 |
+
model = AutoModelForCausalLM.from_pretrained(
|
129 |
+
"datatab/datatab/Yugo55-GPT-v4-4bit", torch_dtype="auto"
|
130 |
+
)
|
131 |
+
|
132 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
133 |
+
"datatab/datatab/Yugo55-GPT-v4-4bit", torch_dtype="auto"
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
```
|
138 |
+
|
139 |
+
```python
|
140 |
+
from typing import Optional
|
141 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
142 |
+
|
143 |
+
|
144 |
+
def generate(
|
145 |
+
user_content: str, system_content: Optional[str] = ""
|
146 |
+
) -> str:
|
147 |
+
system_content = "Odgovoraj uvek na Srpskom jeziku latinica!!! Ispod je uputstvo koje opisuje zadatak, upareno sa unosom koji pruža dodatni kontekst. Napišite odgovor koji na odgovarajući način kompletira zahtev."
|
148 |
+
|
149 |
+
messages = [
|
150 |
+
{
|
151 |
+
"role": "system",
|
152 |
+
"content": system_content,
|
153 |
+
},
|
154 |
+
{"role": "user", "content": user_content},
|
155 |
+
]
|
156 |
+
|
157 |
+
tokenized_chat = tokenizer.apply_chat_template(
|
158 |
+
messages, tokenize=True, add_generation_prompt=True, return_tensors="pt"
|
159 |
+
).to("cuda")
|
160 |
+
|
161 |
+
text_streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
162 |
+
output = model.generate(
|
163 |
+
tokenized_chat,
|
164 |
+
streamer=text_streamer,
|
165 |
+
max_new_tokens=2048,
|
166 |
+
temperature=0.1,
|
167 |
+
repetition_penalty=1.11,
|
168 |
+
top_p=0.92,
|
169 |
+
top_k=1000,
|
170 |
+
pad_token_id=tokenizer.pad_token_id,
|
171 |
+
eos_token_id=tokenizer.eos_token_id,
|
172 |
+
do_sample=True,
|
173 |
+
)
|
174 |
+
|
175 |
+
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
176 |
+
|
177 |
+
|
178 |
+
```
|
179 |
+
|
180 |
+
```python
|
181 |
+
generate("Nabroj mi sve planete suncevog sistemai reci mi koja je najveca planeta")
|
182 |
+
```
|
183 |
+
|
184 |
+
```python
|
185 |
+
generate("Koja je razlika između lame, vikune i alpake?")
|
186 |
+
```
|
187 |
+
|
188 |
+
```python
|
189 |
+
generate("Napišite kratku e-poruku Semu Altmanu dajući razloge za GPT-4 otvorenog koda")
|
190 |
+
```
|
191 |
+
|