Commit
·
4514f4d
1
Parent(s):
cb894be
up to date
Browse files- README.md +5 -2
- config.json +1 -1
- generation_config.json +5 -4
- special_tokens_map.json +3 -4
- tokenizer.json +3 -3
- tokenizer_config.json +4 -3
README.md
CHANGED
@@ -5,6 +5,7 @@ extra_gated_description: >-
|
|
5 |
granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our
|
6 |
license terms and acceptable use policy before submitting this form. Requests
|
7 |
will be processed in 1-2 days.
|
|
|
8 |
extra_gated_button_content: Submit
|
9 |
extra_gated_fields:
|
10 |
I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox
|
@@ -20,7 +21,7 @@ tags:
|
|
20 |
- llama-2
|
21 |
---
|
22 |
# **Llama 2**
|
23 |
-
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B
|
24 |
|
25 |
## Model Details
|
26 |
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
@@ -52,6 +53,8 @@ Meta developed and publicly released the Llama 2 family of large language models
|
|
52 |
|
53 |
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
54 |
|
|
|
|
|
55 |
## Intended Use
|
56 |
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
57 |
|
@@ -131,4 +134,4 @@ Please report any software “bug,” or other problems with the models through
|
|
131 |
|---|---|---|---|---|
|
132 |
|7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
|
133 |
|13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
|
134 |
-
|70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
|
|
|
5 |
granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our
|
6 |
license terms and acceptable use policy before submitting this form. Requests
|
7 |
will be processed in 1-2 days.
|
8 |
+
extra_gated_prompt: "**Your Hugging Face account email address MUST match the email you provide on the Meta website, or your request will not be approved.**"
|
9 |
extra_gated_button_content: Submit
|
10 |
extra_gated_fields:
|
11 |
I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox
|
|
|
21 |
- llama-2
|
22 |
---
|
23 |
# **Llama 2**
|
24 |
+
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
25 |
|
26 |
## Model Details
|
27 |
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
|
|
53 |
|
54 |
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
55 |
|
56 |
+
**Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288)
|
57 |
+
|
58 |
## Intended Use
|
59 |
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
60 |
|
|
|
134 |
|---|---|---|---|---|
|
135 |
|7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)|
|
136 |
|13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)|
|
137 |
+
|70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
|
config.json
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"LlamaForCausalLM"
|
4 |
],
|
@@ -13,7 +14,6 @@
|
|
13 |
"num_attention_heads": 32,
|
14 |
"num_hidden_layers": 32,
|
15 |
"num_key_value_heads": 32,
|
16 |
-
"pad_token_id": 0,
|
17 |
"pretraining_tp": 1,
|
18 |
"rms_norm_eps": 1e-05,
|
19 |
"rope_scaling": null,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "hammerjohn/learning-Llama-2-7b-chat-hf",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
|
|
14 |
"num_attention_heads": 32,
|
15 |
"num_hidden_layers": 32,
|
16 |
"num_key_value_heads": 32,
|
|
|
17 |
"pretraining_tp": 1,
|
18 |
"rms_norm_eps": 1e-05,
|
19 |
"rope_scaling": null,
|
generation_config.json
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
{
|
2 |
-
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
|
|
4 |
"eos_token_id": 2,
|
5 |
-
"pad_token_id":
|
6 |
-
"temperature": 0.
|
7 |
-
"
|
|
|
8 |
"transformers_version": "4.31.0.dev0"
|
9 |
}
|
|
|
1 |
{
|
|
|
2 |
"bos_token_id": 1,
|
3 |
+
"do_sample": true,
|
4 |
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"temperature": 0.6,
|
7 |
+
"max_length": 4096,
|
8 |
+
"top_p": 0.9,
|
9 |
"transformers_version": "4.31.0.dev0"
|
10 |
}
|
special_tokens_map.json
CHANGED
@@ -2,22 +2,21 @@
|
|
2 |
"bos_token": {
|
3 |
"content": "<s>",
|
4 |
"lstrip": false,
|
5 |
-
"normalized":
|
6 |
"rstrip": false,
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
"content": "</s>",
|
11 |
"lstrip": false,
|
12 |
-
"normalized":
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
-
"pad_token": "<unk>",
|
17 |
"unk_token": {
|
18 |
"content": "<unk>",
|
19 |
"lstrip": false,
|
20 |
-
"normalized":
|
21 |
"rstrip": false,
|
22 |
"single_word": false
|
23 |
}
|
|
|
2 |
"bos_token": {
|
3 |
"content": "<s>",
|
4 |
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
"rstrip": false,
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
"content": "</s>",
|
11 |
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
|
|
16 |
"unk_token": {
|
17 |
"content": "<unk>",
|
18 |
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
"rstrip": false,
|
21 |
"single_word": false
|
22 |
}
|
tokenizer.json
CHANGED
@@ -9,7 +9,7 @@
|
|
9 |
"single_word": false,
|
10 |
"lstrip": false,
|
11 |
"rstrip": false,
|
12 |
-
"normalized":
|
13 |
"special": true
|
14 |
},
|
15 |
{
|
@@ -18,7 +18,7 @@
|
|
18 |
"single_word": false,
|
19 |
"lstrip": false,
|
20 |
"rstrip": false,
|
21 |
-
"normalized":
|
22 |
"special": true
|
23 |
},
|
24 |
{
|
@@ -27,7 +27,7 @@
|
|
27 |
"single_word": false,
|
28 |
"lstrip": false,
|
29 |
"rstrip": false,
|
30 |
-
"normalized":
|
31 |
"special": true
|
32 |
}
|
33 |
],
|
|
|
9 |
"single_word": false,
|
10 |
"lstrip": false,
|
11 |
"rstrip": false,
|
12 |
+
"normalized": false,
|
13 |
"special": true
|
14 |
},
|
15 |
{
|
|
|
18 |
"single_word": false,
|
19 |
"lstrip": false,
|
20 |
"rstrip": false,
|
21 |
+
"normalized": false,
|
22 |
"special": true
|
23 |
},
|
24 |
{
|
|
|
27 |
"single_word": false,
|
28 |
"lstrip": false,
|
29 |
"rstrip": false,
|
30 |
+
"normalized": false,
|
31 |
"special": true
|
32 |
}
|
33 |
],
|
tokenizer_config.json
CHANGED
@@ -5,7 +5,7 @@
|
|
5 |
"__type": "AddedToken",
|
6 |
"content": "<s>",
|
7 |
"lstrip": false,
|
8 |
-
"normalized":
|
9 |
"rstrip": false,
|
10 |
"single_word": false
|
11 |
},
|
@@ -14,20 +14,21 @@
|
|
14 |
"__type": "AddedToken",
|
15 |
"content": "</s>",
|
16 |
"lstrip": false,
|
17 |
-
"normalized":
|
18 |
"rstrip": false,
|
19 |
"single_word": false
|
20 |
},
|
21 |
"legacy": false,
|
22 |
"model_max_length": 1000000000000000019884624838656,
|
23 |
"pad_token": null,
|
|
|
24 |
"sp_model_kwargs": {},
|
25 |
"tokenizer_class": "LlamaTokenizer",
|
26 |
"unk_token": {
|
27 |
"__type": "AddedToken",
|
28 |
"content": "<unk>",
|
29 |
"lstrip": false,
|
30 |
-
"normalized":
|
31 |
"rstrip": false,
|
32 |
"single_word": false
|
33 |
}
|
|
|
5 |
"__type": "AddedToken",
|
6 |
"content": "<s>",
|
7 |
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
"rstrip": false,
|
10 |
"single_word": false
|
11 |
},
|
|
|
14 |
"__type": "AddedToken",
|
15 |
"content": "</s>",
|
16 |
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
"rstrip": false,
|
19 |
"single_word": false
|
20 |
},
|
21 |
"legacy": false,
|
22 |
"model_max_length": 1000000000000000019884624838656,
|
23 |
"pad_token": null,
|
24 |
+
"padding_side": "right",
|
25 |
"sp_model_kwargs": {},
|
26 |
"tokenizer_class": "LlamaTokenizer",
|
27 |
"unk_token": {
|
28 |
"__type": "AddedToken",
|
29 |
"content": "<unk>",
|
30 |
"lstrip": false,
|
31 |
+
"normalized": false,
|
32 |
"rstrip": false,
|
33 |
"single_word": false
|
34 |
}
|