bobtk commited on
Commit
8bd499d
1 Parent(s): 58ed981

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - ja
5
+ license: llama2
6
+ library_name: transformers
7
+ tags:
8
+ - mlx
9
+ pipeline_tag: text-generation
10
+ model_type: llama
11
+ ---
12
+
13
+ # mlx-community/Swallow-70b-instruct-v0.1-4bit
14
+ This model was converted to MLX format from [`tokyotech-llm/Swallow-70b-instruct-v0.1`]() using mlx-lm version **0.6.0**.
15
+ Refer to the [original model card](https://huggingface.co/tokyotech-llm/Swallow-70b-instruct-v0.1) for more details on the model.
16
+ ## Use with mlx
17
+
18
+ ```bash
19
+ pip install mlx-lm
20
+ ```
21
+
22
+ ```python
23
+ from mlx_lm import load, generate
24
+
25
+ model, tokenizer = load("mlx-community/Swallow-70b-instruct-v0.1-4bit")
26
+ response = generate(model, tokenizer, prompt="hello", verbose=True)
27
+ ```
config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_cross_attention": false,
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bad_words_ids": null,
9
+ "begin_suppress_tokens": null,
10
+ "bos_token_id": 1,
11
+ "chunk_size_feed_forward": 0,
12
+ "cross_attention_hidden_size": null,
13
+ "decoder_start_token_id": null,
14
+ "diversity_penalty": 0.0,
15
+ "do_sample": false,
16
+ "early_stopping": false,
17
+ "encoder_no_repeat_ngram_size": 0,
18
+ "eos_token_id": 2,
19
+ "exponential_decay_length_penalty": null,
20
+ "finetuning_task": null,
21
+ "forced_bos_token_id": null,
22
+ "forced_eos_token_id": null,
23
+ "hidden_act": "silu",
24
+ "hidden_size": 8192,
25
+ "id2label": {
26
+ "0": "LABEL_0",
27
+ "1": "LABEL_1"
28
+ },
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 28672,
31
+ "is_decoder": false,
32
+ "is_encoder_decoder": false,
33
+ "label2id": {
34
+ "LABEL_0": 0,
35
+ "LABEL_1": 1
36
+ },
37
+ "length_penalty": 1.0,
38
+ "max_length": 20,
39
+ "max_position_embeddings": 4096,
40
+ "max_sequence_length": 4096,
41
+ "min_length": 0,
42
+ "model_type": "llama",
43
+ "no_repeat_ngram_size": 0,
44
+ "num_attention_heads": 64,
45
+ "num_beam_groups": 1,
46
+ "num_beams": 1,
47
+ "num_hidden_layers": 80,
48
+ "num_key_value_heads": 8,
49
+ "num_return_sequences": 1,
50
+ "output_attentions": false,
51
+ "output_hidden_states": false,
52
+ "output_scores": false,
53
+ "pad_token_id": 0,
54
+ "prefix": null,
55
+ "pretraining_tp": 1,
56
+ "problem_type": null,
57
+ "pruned_heads": {},
58
+ "quantization": {
59
+ "group_size": 64,
60
+ "bits": 4
61
+ },
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "rms_norm_eps": 1e-05,
67
+ "rope_scaling": null,
68
+ "rope_theta": 10000.0,
69
+ "sep_token_id": null,
70
+ "suppress_tokens": null,
71
+ "task_specific_params": null,
72
+ "temperature": 1.0,
73
+ "tf_legacy_loss": false,
74
+ "tie_encoder_decoder": false,
75
+ "tie_word_embeddings": false,
76
+ "tokenizer_class": null,
77
+ "top_k": 50,
78
+ "top_p": 1.0,
79
+ "torch_dtype": "bfloat16",
80
+ "torchscript": false,
81
+ "transformers_version": "4.40.0.dev0",
82
+ "typical_p": 1.0,
83
+ "use_bfloat16": false,
84
+ "use_cache": true,
85
+ "vocab_size": 43176
86
+ }
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8299e6b70c865d44d42375f771318a4c805a1b92a357f358d6db87139a1c26f
3
+ size 5256438576
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff486a9cf78415c4e768bc4ca29cab5beccbeda44e03092c8025f0549c241d60
3
+ size 5294649699
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa44178cdfa5064c16e749526859eab3cdb5519281aae63cdcb523ac6297bddd
3
+ size 5294649665
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e48739cfc77e6a938032c3e1945510f570b19c8fbd4ea46b495f9b9672b1d7
3
+ size 5294649717
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5bce9912394eca7b1ab7c75e79937ae0321286f3d51752c1884deec4269142c
3
+ size 5294649707
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b95681b409111cb25859aab287e399c511197b005c398045eaf9d89214452fe
3
+ size 5294649663
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36f37241f249a5646d35457c5d3975c6fb511fca9405e81eac5de3bdcedbdfc1
3
+ size 5294649743
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43c0d6ee21830231f2183a84c3d57628971753525515fea23e0488631c39af4c
3
+ size 2388573727
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c877c5ca885bad5c19d1b1706a2703f8b30de90f03c1f834f8bdb9faf79821e8
3
+ size 914000
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'あなたは誠実で優秀な日本人のアシスタントです。' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{{ bos_token }}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + content.strip() + ' [/INST] ' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ '' + content.strip() + '' + eos_token }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": null,
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }