mbahrsnc commited on
Commit
b178816
1 Parent(s): 326d5aa

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -28,7 +28,7 @@ slices:
28
  layer_range: [0, 21]
29
  output_weight: 0.6
30
  merge_method: slerp
31
- base_model: cognitivecomputations/TinyDolphin-2.8-1.1b
32
  parameters:
33
  t:
34
  - filter: self_attn
 
28
  layer_range: [0, 21]
29
  output_weight: 0.6
30
  merge_method: slerp
31
+ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
32
  parameters:
33
  t:
34
  - filter: self_attn
config.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
- "_name_or_path": "cognitivecomputations/TinyDolphin-2.8-1.1b",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
- "eos_token_id": 32000,
10
  "hidden_act": "silu",
11
  "hidden_size": 2048,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 5632,
14
- "max_position_embeddings": 4096,
15
  "mlp_bias": false,
16
  "model_type": "llama",
17
  "num_attention_heads": 32,
@@ -25,5 +25,5 @@
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
  "use_cache": true,
28
- "vocab_size": 32002
29
  }
 
1
  {
2
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
  "hidden_act": "silu",
11
  "hidden_size": 2048,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 5632,
14
+ "max_position_embeddings": 2048,
15
  "mlp_bias": false,
16
  "model_type": "llama",
17
  "num_attention_heads": 32,
 
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
  "use_cache": true,
28
+ "vocab_size": 32000
29
  }
mergekit_config.yml CHANGED
@@ -8,7 +8,7 @@ slices:
8
  layer_range: [0, 21]
9
  output_weight: 0.6
10
  merge_method: slerp
11
- base_model: cognitivecomputations/TinyDolphin-2.8-1.1b
12
  parameters:
13
  t:
14
  - filter: self_attn
 
8
  layer_range: [0, 21]
9
  output_weight: 0.6
10
  merge_method: slerp
11
+ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
12
  parameters:
13
  t:
14
  - filter: self_attn
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01fcd88232a151c4ad4f1295979447f9577e87e6182a411d00ea8c0006f9b208
3
  size 989934032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f0eedd80e35d5a0751472c0995cbfd559707767246f2d7693b20b26f82bca88
3
  size 989934032
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fec467f693c2068e4e6ff2faab788146e226cf8e79567874e1b0f5b2cc70161
3
  size 992054392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:194954945d7b17ba7346db8a3d6af80dac5effd8b8072c37d64e53064811d264
3
  size 992054392
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a48485d56c95525b28fb0cc03b2d2fe31a0cb975d12b3aa9fd9c037a90f8681
3
  size 130041616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1da31f232f38a6d44a6f7c09ed213856f4e823f3ab4afd8240beff3c591d895
3
  size 130041616
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|im_end|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer.json CHANGED
@@ -29,33 +29,25 @@
29
  "rstrip": false,
30
  "normalized": false,
31
  "special": true
32
- },
33
- {
34
- "id": 32000,
35
- "content": "<|im_end|>",
36
- "single_word": false,
37
- "lstrip": false,
38
- "rstrip": false,
39
- "normalized": false,
40
- "special": true
41
- },
42
- {
43
- "id": 32001,
44
- "content": "<|im_start|>",
45
- "single_word": false,
46
- "lstrip": false,
47
- "rstrip": false,
48
- "normalized": false,
49
- "special": false
50
  }
51
  ],
52
- "normalizer": null,
53
- "pre_tokenizer": {
54
- "type": "Metaspace",
55
- "replacement": "▁",
56
- "prepend_scheme": "first",
57
- "split": false
 
 
 
 
 
 
 
 
 
58
  },
 
59
  "post_processor": {
60
  "type": "TemplateProcessing",
61
  "single": [
 
29
  "rstrip": false,
30
  "normalized": false,
31
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  }
33
  ],
34
+ "normalizer": {
35
+ "type": "Sequence",
36
+ "normalizers": [
37
+ {
38
+ "type": "Prepend",
39
+ "prepend": "▁"
40
+ },
41
+ {
42
+ "type": "Replace",
43
+ "pattern": {
44
+ "String": " "
45
+ },
46
+ "content": "▁"
47
+ }
48
+ ]
49
  },
50
+ "pre_tokenizer": null,
51
  "post_processor": {
52
  "type": "TemplateProcessing",
53
  "single": [
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -26,36 +26,18 @@
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
29
- },
30
- "32000": {
31
- "content": "<|im_end|>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<|im_start|>",
40
- "lstrip": false,
41
- "normalized": false,
42
- "rstrip": false,
43
- "single_word": false,
44
- "special": false
45
  }
46
  },
47
  "bos_token": "<s>",
 
48
  "clean_up_tokenization_spaces": false,
49
- "eos_token": "<|im_end|>",
50
  "legacy": false,
51
- "model_max_length": 1000000000000000019884624838656,
52
  "pad_token": "</s>",
53
  "padding_side": "right",
54
  "sp_model_kwargs": {},
55
- "spaces_between_special_tokens": false,
56
  "tokenizer_class": "LlamaTokenizer",
57
- "trust_remote_code": false,
58
  "unk_token": "<unk>",
59
- "use_default_system_prompt": false,
60
- "use_fast": true
61
  }
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  }
30
  },
31
  "bos_token": "<s>",
32
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
  "legacy": false,
36
+ "model_max_length": 2048,
37
  "pad_token": "</s>",
38
  "padding_side": "right",
39
  "sp_model_kwargs": {},
 
40
  "tokenizer_class": "LlamaTokenizer",
 
41
  "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
 
43
  }