emozilla commited on
Commit
3cf5893
1 Parent(s): cc55ce4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,5 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- Meta-Llama-3-8B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
- Meta-Llama-3-8B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
README.md CHANGED
@@ -185,6 +185,29 @@ extra_gated_fields:
185
  By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
186
  extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
187
  extra_gated_button_content: Submit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  ---
189
 
190
  ## Model Details
@@ -273,7 +296,9 @@ This repository contains two versions of Meta-Llama-3-8B-Instruct, for use with
273
 
274
  ### Use with transformers
275
 
276
- See the snippet below for usage with Transformers:
 
 
277
 
278
  ```python
279
  import transformers
@@ -285,7 +310,7 @@ pipeline = transformers.pipeline(
285
  "text-generation",
286
  model=model_id,
287
  model_kwargs={"torch_dtype": torch.bfloat16},
288
- device="cuda",
289
  )
290
 
291
  messages = [
@@ -300,8 +325,8 @@ prompt = pipeline.tokenizer.apply_chat_template(
300
  )
301
 
302
  terminators = [
303
- tokenizer.eos_token_id,
304
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
305
  ]
306
 
307
  outputs = pipeline(
@@ -315,6 +340,50 @@ outputs = pipeline(
315
  print(outputs[0]["generated_text"][len(prompt):])
316
  ```
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  ### Use with `llama3`
319
 
320
  Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3)
 
185
  By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
186
  extra_gated_description: The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
187
  extra_gated_button_content: Submit
188
+ widget:
189
+ - example_title: Hello
190
+ messages:
191
+ - role: user
192
+ content: Hey my name is Julien! How are you?
193
+ - example_title: Winter holidays
194
+ messages:
195
+ - role: system
196
+ content: You are a helpful and honest assistant. Please, respond concisely and truthfully.
197
+ - role: user
198
+ content: Can you recommend a good destination for Winter holidays?
199
+ - example_title: Programming assistant
200
+ messages:
201
+ - role: system
202
+ content: You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully.
203
+ - role: user
204
+ content: Write a function that computes the nth fibonacci number.
205
+ inference:
206
+ parameters:
207
+ max_new_tokens: 300
208
+ stop:
209
+ - <|end_of_text|>
210
+ - <|eot_id|>
211
  ---
212
 
213
  ## Model Details
 
296
 
297
  ### Use with transformers
298
 
299
+ You can run conversational inference using the Transformers pipeline abstraction, or by leveraging the Auto classes with the `generate()` function. Let's see examples of both.
300
+
301
+ #### Transformers pipeline
302
 
303
  ```python
304
  import transformers
 
310
  "text-generation",
311
  model=model_id,
312
  model_kwargs={"torch_dtype": torch.bfloat16},
313
+ device_map="auto",
314
  )
315
 
316
  messages = [
 
325
  )
326
 
327
  terminators = [
328
+ pipeline.tokenizer.eos_token_id,
329
+ pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
330
  ]
331
 
332
  outputs = pipeline(
 
340
  print(outputs[0]["generated_text"][len(prompt):])
341
  ```
342
 
343
+ #### Transformers AutoModelForCausalLM
344
+
345
+ ```python
346
+ from transformers import AutoTokenizer, AutoModelForCausalLM
347
+ import torch
348
+
349
+ model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
350
+
351
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
352
+ model = AutoModelForCausalLM.from_pretrained(
353
+ model_id,
354
+ torch_dtype=torch.bfloat16,
355
+ device_map="auto",
356
+ )
357
+
358
+ messages = [
359
+ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
360
+ {"role": "user", "content": "Who are you?"},
361
+ ]
362
+
363
+ input_ids = tokenizer.apply_chat_template(
364
+ messages,
365
+ add_generation_prompt=True,
366
+ return_tensors="pt"
367
+ ).to(model.device)
368
+
369
+ terminators = [
370
+ tokenizer.eos_token_id,
371
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
372
+ ]
373
+
374
+ outputs = model.generate(
375
+ input_ids,
376
+ max_new_tokens=256,
377
+ eos_token_id=terminators,
378
+ do_sample=True,
379
+ temperature=0.6,
380
+ top_p=0.9,
381
+ )
382
+ response = outputs[0][input_ids.shape[-1]:]
383
+ print(tokenizer.decode(response, skip_special_tokens=True))
384
+ ```
385
+
386
+
387
  ### Use with `llama3`
388
 
389
  Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3)
generation_config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 128000,
4
  "eos_token_id": [128001, 128009],
 
 
 
 
5
  "transformers_version": "4.40.0.dev0"
6
  }
 
1
  {
 
2
  "bos_token_id": 128000,
3
  "eos_token_id": [128001, 128009],
4
+ "do_sample": true,
5
+ "temperature": 0.6,
6
+ "max_length": 4096,
7
+ "top_p": 0.9,
8
  "transformers_version": "4.40.0.dev0"
9
  }
tokenizer.json CHANGED
@@ -2329,10 +2329,69 @@
2329
  ]
2330
  },
2331
  "post_processor": {
2332
- "type": "ByteLevel",
2333
- "add_prefix_space": true,
2334
- "trim_offsets": false,
2335
- "use_regex": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336
  },
2337
  "decoder": {
2338
  "type": "ByteLevel",
 
2329
  ]
2330
  },
2331
  "post_processor": {
2332
+ "type": "Sequence",
2333
+ "processors": [
2334
+ {
2335
+ "type": "ByteLevel",
2336
+ "add_prefix_space": true,
2337
+ "trim_offsets": false,
2338
+ "use_regex": true
2339
+ },
2340
+ {
2341
+ "type": "TemplateProcessing",
2342
+ "single": [
2343
+ {
2344
+ "SpecialToken": {
2345
+ "id": "<|begin_of_text|>",
2346
+ "type_id": 0
2347
+ }
2348
+ },
2349
+ {
2350
+ "Sequence": {
2351
+ "id": "A",
2352
+ "type_id": 0
2353
+ }
2354
+ }
2355
+ ],
2356
+ "pair": [
2357
+ {
2358
+ "SpecialToken": {
2359
+ "id": "<|begin_of_text|>",
2360
+ "type_id": 0
2361
+ }
2362
+ },
2363
+ {
2364
+ "Sequence": {
2365
+ "id": "A",
2366
+ "type_id": 0
2367
+ }
2368
+ },
2369
+ {
2370
+ "SpecialToken": {
2371
+ "id": "<|begin_of_text|>",
2372
+ "type_id": 1
2373
+ }
2374
+ },
2375
+ {
2376
+ "Sequence": {
2377
+ "id": "B",
2378
+ "type_id": 1
2379
+ }
2380
+ }
2381
+ ],
2382
+ "special_tokens": {
2383
+ "<|begin_of_text|>": {
2384
+ "id": "<|begin_of_text|>",
2385
+ "ids": [
2386
+ 128000
2387
+ ],
2388
+ "tokens": [
2389
+ "<|begin_of_text|>"
2390
+ ]
2391
+ }
2392
+ }
2393
+ }
2394
+ ]
2395
  },
2396
  "decoder": {
2397
  "type": "ByteLevel",
tokenizer_config.json CHANGED
@@ -2050,7 +2050,7 @@
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
- "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|end_of_text|>",
2056
  "model_input_names": [
 
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
+ "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|end_of_text|>",
2056
  "model_input_names": [