Liam-SC commited on
Commit
19412de
1 Parent(s): a53a835

Edit model

Browse files
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
.gitattributes CHANGED
@@ -139,3 +139,9 @@ params_shard_69.bin filter=lfs diff=lfs merge=lfs -text
139
  params_shard_84.bin filter=lfs diff=lfs merge=lfs -text
140
  params_shard_42.bin filter=lfs diff=lfs merge=lfs -text
141
  params_shard_49.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
139
  params_shard_84.bin filter=lfs diff=lfs merge=lfs -text
140
  params_shard_42.bin filter=lfs diff=lfs merge=lfs -text
141
  params_shard_49.bin filter=lfs diff=lfs merge=lfs -text
142
+ . filter=lfs diff=lfs merge=lfs -text
143
+ mlc-chat-config.json filter=lfs diff=lfs merge=lfs -text
144
+ ndarray-cache.json filter=lfs diff=lfs merge=lfs -text
145
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
146
+ tokenizer.model filter=lfs diff=lfs merge=lfs -text
147
+ tokenizer_config.json filter=lfs diff=lfs merge=lfs -text
mlc-chat-config.json CHANGED
@@ -1,76 +1,3 @@
1
- {
2
- "model_type": "mistral",
3
- "quantization": "q4f16_1",
4
- "model_config": {
5
- "hidden_size": 4096,
6
- "intermediate_size": 14336,
7
- "num_attention_heads": 32,
8
- "num_hidden_layers": 32,
9
- "rms_norm_eps": 1e-05,
10
- "vocab_size": 32000,
11
- "position_embedding_base": 10000.0,
12
- "num_key_value_heads": 8,
13
- "head_dim": 128,
14
- "sliding_window_size": 4096,
15
- "prefill_chunk_size": 4096,
16
- "attention_sink_size": 4,
17
- "tensor_parallel_shards": 1,
18
- "max_batch_size": 80
19
- },
20
- "vocab_size": 32000,
21
- "context_window_size": -1,
22
- "sliding_window_size": 4096,
23
- "prefill_chunk_size": 4096,
24
- "attention_sink_size": 4,
25
- "tensor_parallel_shards": 1,
26
- "mean_gen_len": 128,
27
- "max_gen_len": 512,
28
- "shift_fill_factor": 0.3,
29
- "temperature": 0.7,
30
- "presence_penalty": 0.0,
31
- "frequency_penalty": 0.0,
32
- "repetition_penalty": 1.0,
33
- "top_p": 0.95,
34
- "conv_template": {
35
- "name": "mistral_default",
36
- "system_template": "[INST] {system_message}",
37
- "system_message": "Always assist with care, respect, and truth. Respond with utmost utility yet securely. Avoid harmful, unethical, prejudiced, or negative content. Ensure replies promote fairness and positivity.",
38
- "system_prefix_token_ids": [
39
- 1
40
- ],
41
- "add_role_after_system_message": false,
42
- "roles": {
43
- "user": "[INST]",
44
- "assistant": "[/INST]",
45
- "tool": "[INST]"
46
- },
47
- "role_templates": {
48
- "user": "{user_message}",
49
- "assistant": "{assistant_message}",
50
- "tool": "{tool_message}"
51
- },
52
- "messages": [],
53
- "seps": [
54
- " "
55
- ],
56
- "role_content_sep": " ",
57
- "role_empty_sep": "",
58
- "stop_str": [
59
- "</s>"
60
- ],
61
- "stop_token_ids": [
62
- 2
63
- ],
64
- "function_string": "",
65
- "use_function_calling": false
66
- },
67
- "pad_token_id": 0,
68
- "bos_token_id": 1,
69
- "eos_token_id": 2,
70
- "tokenizer_files": [
71
- "tokenizer.model",
72
- "tokenizer.json",
73
- "tokenizer_config.json"
74
- ],
75
- "version": "0.1.0"
76
- }
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:641b43c64fab0a83e7c5b176f217b818b010505a0d4decc28d22d938b46159a6
3
+ size 1947
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ndarray-cache.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,43 +1,3 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- }
29
- },
30
- "additional_special_tokens": [],
31
- "bos_token": "<s>",
32
- "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
- "clean_up_tokenization_spaces": false,
34
- "eos_token": "</s>",
35
- "legacy": true,
36
- "model_max_length": 2048,
37
- "pad_token": "</s>",
38
- "sp_model_kwargs": {},
39
- "spaces_between_special_tokens": false,
40
- "tokenizer_class": "LlamaTokenizer",
41
- "unk_token": "<unk>",
42
- "use_default_system_prompt": false
43
- }
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42a964154f5c5d9f9efdb031cae2b1bb93778ff3044197cecbe9c4b60d114e6
3
+ size 1389