afrideva commited on
Commit
75a38ec
1 Parent(s): f3f952a

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +231 -0
README.md ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: mesolitica/malaysian-tinyllama-1.1b-16k-instructions
3
+ inference: false
4
+ language:
5
+ - ms
6
+ model_creator: mesolitica
7
+ model_name: malaysian-tinyllama-1.1b-16k-instructions
8
+ pipeline_tag: text-generation
9
+ quantized_by: afrideva
10
+ tags:
11
+ - gguf
12
+ - ggml
13
+ - quantized
14
+ - q2_k
15
+ - q3_k_m
16
+ - q4_k_m
17
+ - q5_k_m
18
+ - q6_k
19
+ - q8_0
20
+ ---
21
+ # mesolitica/malaysian-tinyllama-1.1b-16k-instructions-GGUF
22
+
23
+ Quantized GGUF model files for [malaysian-tinyllama-1.1b-16k-instructions](https://huggingface.co/mesolitica/malaysian-tinyllama-1.1b-16k-instructions) from [mesolitica](https://huggingface.co/mesolitica)
24
+
25
+
26
+ | Name | Quant method | Size |
27
+ | ---- | ---- | ---- |
28
+ | [malaysian-tinyllama-1.1b-16k-instructions.q2_k.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q2_k.gguf) | q2_k | None |
29
+ | [malaysian-tinyllama-1.1b-16k-instructions.q3_k_m.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q3_k_m.gguf) | q3_k_m | None |
30
+ | [malaysian-tinyllama-1.1b-16k-instructions.q4_k_m.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q4_k_m.gguf) | q4_k_m | None |
31
+ | [malaysian-tinyllama-1.1b-16k-instructions.q5_k_m.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q5_k_m.gguf) | q5_k_m | None |
32
+ | [malaysian-tinyllama-1.1b-16k-instructions.q6_k.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q6_k.gguf) | q6_k | None |
33
+ | [malaysian-tinyllama-1.1b-16k-instructions.q8_0.gguf](https://huggingface.co/afrideva/malaysian-tinyllama-1.1b-16k-instructions-GGUF/resolve/main/malaysian-tinyllama-1.1b-16k-instructions.q8_0.gguf) | q8_0 | None |
34
+
35
+
36
+
37
+ ## Original Model Card:
38
+ # Full Parameter Finetuning TinyLlama 16384 context length on Malaysian instructions dataset
39
+
40
+ README at https://github.com/mesolitica/malaya/tree/5.1/session/tiny-llama#instructions-7b-16384-context-length
41
+
42
+ We use exact Llama2 Instruct chat template, added with function call
43
+
44
+ WandB, https://wandb.ai/mesolitica/fpf-tinyllama-1.1b-hf-instructions-16k-function-call?workspace=user-husein-mesolitica
45
+
46
+ ## how-to
47
+
48
+ ```python
49
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
50
+ import torch
51
+
52
+ def parse_llama_chat(messages, function_call = None):
53
+
54
+ system = messages[0]['content']
55
+ user_query = messages[-1]['content']
56
+
57
+ users, assistants = [], []
58
+ for q in messages[1:-1]:
59
+ if q['role'] == 'user':
60
+ users.append(q['content'])
61
+ elif q['role'] == 'assistant':
62
+ assistants.append(q['content'])
63
+
64
+ texts = [f'<s>[INST] <<SYS>>\n{system}\n<</SYS>>\n\n']
65
+ if function_call:
66
+ fs = []
67
+ for f in function_call:
68
+ f = json.dumps(f, indent=4)
69
+ fs.append(f)
70
+ fs = '\n\n'.join(fs)
71
+ texts.append(f'\n[FUNCTIONCALL]\n{fs}\n')
72
+ for u, a in zip(users, assistants):
73
+ texts.append(f'{u.strip()} [/INST] {a.strip()} </s><s>[INST] ')
74
+ texts.append(f'{user_query.strip()} [/INST]')
75
+ prompt = ''.join(texts).strip()
76
+ return prompt
77
+
78
+ TORCH_DTYPE = 'bfloat16'
79
+ nf4_config = BitsAndBytesConfig(
80
+ load_in_4bit=True,
81
+ bnb_4bit_quant_type='nf4',
82
+ bnb_4bit_use_double_quant=True,
83
+ bnb_4bit_compute_dtype=getattr(torch, TORCH_DTYPE)
84
+ )
85
+
86
+ tokenizer = AutoTokenizer.from_pretrained('mesolitica/malaysian-tinyllama-1.1b-16k-instructions')
87
+ model = AutoModelForCausalLM.from_pretrained(
88
+ 'mesolitica/malaysian-tinyllama-1.1b-16k-instructions',
89
+ use_flash_attention_2 = True,
90
+ quantization_config = nf4_config
91
+ )
92
+
93
+ messages = [
94
+ {'role': 'system', 'content': 'awak adalah AI yang mampu jawab segala soalan'},
95
+ {'role': 'user', 'content': 'kwsp tu apa'}
96
+ ]
97
+ prompt = parse_llama_chat(messages)
98
+ inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
99
+ generate_kwargs = dict(
100
+ inputs,
101
+ max_new_tokens=1024,
102
+ top_p=0.95,
103
+ top_k=50,
104
+ temperature=0.9,
105
+ do_sample=True,
106
+ num_beams=1,
107
+ )
108
+ r = model.generate(**generate_kwargs)
109
+ print(tokenizer.decode(r[0]))
110
+ ```
111
+
112
+ ```text
113
+ '<s> [INST] <<SYS>>
114
+ awak adalah AI yang mampu jawab segala soalan
115
+ <</SYS>>
116
+
117
+ kwsp tu apa [/INST] KWSP bermaksud Kumpulan Wang Persaraan. </s>'
118
+ ```
119
+
120
+ ```python
121
+ messages = [
122
+ {'role': 'system', 'content': 'awak adalah AI yang mampu jawab segala soalan'},
123
+ {'role': 'user', 'content': 'awat malaysia ada jabatan koko, malaysia bukan buat keluaq koko banyak pun'}
124
+ ]
125
+ prompt = parse_llama_chat(messages)
126
+ inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
127
+ generate_kwargs = dict(
128
+ inputs,
129
+ max_new_tokens=1024,
130
+ top_p=0.95,
131
+ top_k=50,
132
+ temperature=0.9,
133
+ do_sample=True,
134
+ num_beams=1,
135
+ )
136
+ r = model.generate(**generate_kwargs)
137
+ print(tokenizer.decode(r[0]))
138
+ ```
139
+
140
+ ```text
141
+ <s> [INST] <<SYS>>
142
+ awak adalah AI yang mampu jawab segala soalan
143
+ <</SYS>>
144
+
145
+ awat malaysia ada jabatan koko, malaysia bukan buat keluaq koko banyak pun [/INST] Jabatan ini bertanggungjawab untuk mengeluarkan dan mengagihkan produk koko ke pasaran tempatan dan antarabangsa. Mereka juga menyumbang kepada pembangunan industri koko dan memastikan penggunaan sumber asli yang bertanggungjawab. Selain itu, mereka menjalankan penyelidikan dan inovasi untuk meningkatkan proses pengeluaran dan meningkatkan daya saing produk koko. </s>
146
+ ```
147
+
148
+ ```python
149
+ f2 = {
150
+ 'name': 'parse_entities',
151
+ 'description': 'extract entities from the text',
152
+ 'parameters': {
153
+ 'type': 'object',
154
+ 'properties': {
155
+ 'drink': {
156
+ 'type': 'string',
157
+ 'description': 'drink name',
158
+ },
159
+ 'event': {
160
+ 'type': 'string',
161
+ 'description': 'event name',
162
+ },
163
+ 'person_name': {
164
+ 'type': 'string',
165
+ 'description': 'person name',
166
+ }
167
+ },
168
+ 'required': [
169
+ 'drink',
170
+ 'event',
171
+ 'person_name'
172
+ ]
173
+ }
174
+ }
175
+ messages = [
176
+ {'role': 'system', 'content': 'awak adalah AI yang mampu jawab segala soalan'},
177
+ {'role': 'user', 'content': 'nama saya husein bin zolkepli, saya sekarang berada di putrajaya merdeka 2023 sambil minum teh o ais'}
178
+ ]
179
+ prompt = parse_llama_chat(messages, function_call = [f2])
180
+ inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
181
+ generate_kwargs = dict(
182
+ inputs,
183
+ max_new_tokens=128,
184
+ top_p=0.95,
185
+ top_k=50,
186
+ temperature=0.9,
187
+ do_sample=True,
188
+ num_beams=1,
189
+ )
190
+ r = model.generate(**generate_kwargs)
191
+ print(tokenizer.decode(r[0]))
192
+ ```
193
+
194
+ ```text
195
+ <s> [INST] <<SYS>>
196
+ awak adalah AI yang mampu jawab segala soalan
197
+ <</SYS>>
198
+
199
+
200
+ [FUNCTIONCALL]
201
+ {
202
+ "name": "parse_entities",
203
+ "description": "extract entities from the text",
204
+ "parameters": {
205
+ "type": "object",
206
+ "properties": {
207
+ "drink": {
208
+ "type": "string",
209
+ "description": "drink name"
210
+ },
211
+ "event": {
212
+ "type": "string",
213
+ "description": "event name"
214
+ },
215
+ "person_name": {
216
+ "type": "string",
217
+ "description": "person name"
218
+ }
219
+ },
220
+ "required": [
221
+ "drink",
222
+ "event",
223
+ "person_name"
224
+ ]
225
+ }
226
+ }
227
+ nama saya husein bin zolkepli, saya sekarang berada di putrajaya merdeka 2023 sambil minum teh o ais [/INST] <functioncall> {"name": "parse_entities", "arguments": '{"drink": "teh o ais", "event": "Merdeka 2023", "person_name": "Husein bin Zolkepli"}'}
228
+
229
+
230
+ <functioncall> {"entities": [{"name": "Husein bin Zolkepli", "confidence": 0.95}]} </s>
231
+ ```