avans06 commited on
Commit
e8db960
1 Parent(s): df79c8d

Synchronize the file contents updated by the meta-llama official:

Browse files

tokenizer.json:
Update post-processor to add bos
generation_config.json:
Update generation_config.json
README.md:
Update widget inference parameters

Files changed (4) hide show
  1. README.md +29 -23
  2. config.json +1 -1
  3. generation_config.json +4 -1
  4. tokenizer.json +63 -4
README.md CHANGED
@@ -189,6 +189,29 @@ extra_gated_fields:
189
  By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
190
  extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
191
  extra_gated_button_content: Submit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  ---
193
 
194
  ## meta-llama/Meta-Llama-3-8B-Instruct for CTranslate2
@@ -285,11 +308,11 @@ Where to send questions or comments about the model Instructions on how to provi
285
 
286
  ## How to use
287
 
288
- This repository for use with [CTranslate2](https://github.com/OpenNMT/CTranslate2).
289
 
290
  ### Use with CTranslate2
291
 
292
- This example code is obtained from [CTranslate2_transformers](https://opennmt.net/CTranslate2/guides/transformers.html#mpt) and [tokenizer AutoTokenizer](https://huggingface.co/docs/transformers/main_classes/tokenizer).
293
  More detailed information about the `generate_batch` methon can be found at [CTranslate2_Generator.generate_batch](https://opennmt.net/CTranslate2/python/ctranslate2.Generator.html#ctranslate2.Generator.generate_batch).
294
 
295
  ```python
@@ -297,31 +320,14 @@ import ctranslate2
297
  import transformers
298
 
299
  model_id = "avans06/Meta-Llama-3-8B-Instruct-ct2-int8_float16"
300
- model = ctranslate2.Generator(model_id, device="auto", compute_type="int8_float16")
301
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
302
 
303
- messages = [
304
- {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
305
- {"role": "user", "content": "Who are you?"},
306
- ]
307
 
308
- input_ids = tokenizer.apply_chat_template(
309
- messages,
310
- tokenize=False,
311
- add_generation_prompt=True
312
- )
313
-
314
- terminators = [
315
- tokenizer.eos_token_id,
316
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
317
- ]
318
-
319
- input_tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(input_ids))
320
-
321
- results = model.generate_batch([input_tokens], include_prompt_in_result=False, max_length=256, sampling_temperature=0.6, sampling_topp=0.9, end_token=terminators)
322
  output = tokenizer.decode(results[0].sequences_ids[0])
323
-
324
- print(output)
325
  ```
326
 
327
  ## Hardware and Software
 
189
  By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
190
  extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
191
  extra_gated_button_content: Submit
192
+ widget:
193
+ - example_title: Hello
194
+ messages:
195
+ - role: user
196
+ content: Hey my name is Julien! How are you?
197
+ - example_title: Winter holidays
198
+ messages:
199
+ - role: system
200
+ content: You are a helpful and honest assistant. Please, respond concisely and truthfully.
201
+ - role: user
202
+ content: Can you recommend a good destination for Winter holidays?
203
+ - example_title: Programming assistant
204
+ messages:
205
+ - role: system
206
+ content: You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully.
207
+ - role: user
208
+ content: Write a function that computes the nth fibonacci number.
209
+ inference:
210
+ parameters:
211
+ max_new_tokens: 300
212
+ stop:
213
+ - <|end_of_text|>
214
+ - <|eot_id|>
215
  ---
216
 
217
  ## meta-llama/Meta-Llama-3-8B-Instruct for CTranslate2
 
308
 
309
  ## How to use
310
 
311
+ This repository for use with `[CTranslate2](https://github.com/OpenNMT/CTranslate2)`.
312
 
313
  ### Use with CTranslate2
314
 
315
+ This example code is obtained from [CTranslate2_transformers](https://opennmt.net/CTranslate2/guides/transformers.html#mpt).
316
  More detailed information about the `generate_batch` methon can be found at [CTranslate2_Generator.generate_batch](https://opennmt.net/CTranslate2/python/ctranslate2.Generator.html#ctranslate2.Generator.generate_batch).
317
 
318
  ```python
 
320
  import transformers
321
 
322
  model_id = "avans06/Meta-Llama-3-8B-Instruct-ct2-int8_float16"
323
+ generator = ctranslate2.Generator(model_id, device="auto", compute_type="int8_float16")
324
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
325
 
326
+ prompt = "What is the meaning of Large language model?"
327
+ input_tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(prompt))
 
 
328
 
329
+ results = generator.generate_batch([input_tokens], include_prompt_in_result=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  output = tokenizer.decode(results[0].sequences_ids[0])
 
 
331
  ```
332
 
333
  ## Hardware and Software
config.json CHANGED
@@ -3,5 +3,5 @@
3
  "eos_token": "<|end_of_text|>",
4
  "layer_norm_epsilon": 1e-05,
5
  "multi_query_attention": true,
6
- "unk_token": "<unk>"
7
  }
 
3
  "eos_token": "<|end_of_text|>",
4
  "layer_norm_epsilon": 1e-05,
5
  "multi_query_attention": true,
6
+ "unk_token": ""
7
  }
generation_config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 128000,
4
  "eos_token_id": [128001, 128009],
 
 
 
 
5
  "transformers_version": "4.40.0.dev0"
6
  }
 
1
  {
 
2
  "bos_token_id": 128000,
3
  "eos_token_id": [128001, 128009],
4
+ "do_sample": true,
5
+ "temperature": 0.6,
6
+ "max_length": 4096,
7
+ "top_p": 0.9,
8
  "transformers_version": "4.40.0.dev0"
9
  }
tokenizer.json CHANGED
@@ -2329,10 +2329,69 @@
2329
  ]
2330
  },
2331
  "post_processor": {
2332
- "type": "ByteLevel",
2333
- "add_prefix_space": true,
2334
- "trim_offsets": false,
2335
- "use_regex": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336
  },
2337
  "decoder": {
2338
  "type": "ByteLevel",
 
2329
  ]
2330
  },
2331
  "post_processor": {
2332
+ "type": "Sequence",
2333
+ "processors": [
2334
+ {
2335
+ "type": "ByteLevel",
2336
+ "add_prefix_space": true,
2337
+ "trim_offsets": false,
2338
+ "use_regex": true
2339
+ },
2340
+ {
2341
+ "type": "TemplateProcessing",
2342
+ "single": [
2343
+ {
2344
+ "SpecialToken": {
2345
+ "id": "<|begin_of_text|>",
2346
+ "type_id": 0
2347
+ }
2348
+ },
2349
+ {
2350
+ "Sequence": {
2351
+ "id": "A",
2352
+ "type_id": 0
2353
+ }
2354
+ }
2355
+ ],
2356
+ "pair": [
2357
+ {
2358
+ "SpecialToken": {
2359
+ "id": "<|begin_of_text|>",
2360
+ "type_id": 0
2361
+ }
2362
+ },
2363
+ {
2364
+ "Sequence": {
2365
+ "id": "A",
2366
+ "type_id": 0
2367
+ }
2368
+ },
2369
+ {
2370
+ "SpecialToken": {
2371
+ "id": "<|begin_of_text|>",
2372
+ "type_id": 1
2373
+ }
2374
+ },
2375
+ {
2376
+ "Sequence": {
2377
+ "id": "B",
2378
+ "type_id": 1
2379
+ }
2380
+ }
2381
+ ],
2382
+ "special_tokens": {
2383
+ "<|begin_of_text|>": {
2384
+ "id": "<|begin_of_text|>",
2385
+ "ids": [
2386
+ 128000
2387
+ ],
2388
+ "tokens": [
2389
+ "<|begin_of_text|>"
2390
+ ]
2391
+ }
2392
+ }
2393
+ }
2394
+ ]
2395
  },
2396
  "decoder": {
2397
  "type": "ByteLevel",