TroyDoesAI commited on
Commit
bb5a277
1 Parent(s): eaaf48d

Upload 10 files

Browse files

Merge of all my mistral models, after testing this model is the most easy to use, not the most capable but is really easy for people to prompt as it seems I am really good at prompting what I want because I wrote the dataset lol. Sorry guys. This one should be more aligned.

config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": ".\\MermaidMistral\\",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
- "transformers_version": "4.35.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
1
  {
2
+ "_name_or_path": ".\\MermaidMistral",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.37.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.35.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.37.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e725e7d34ff5d280859e6d4d1b5aa24861b5ada629778a240fb09cec429d8df
3
  size 4943162240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a156eea914666e2b80bc191ddb570f61e87056c14eb87d86e1fffee30c6c42d8
3
  size 4943162240
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9873f2f88d9eb4c0435bdca825838938fbb243d3811faf0a4743122fb2d01f0
3
  size 4999819232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daccbd2e4b4d06fe7200e8ede94da165c4220f40ad340ec3563178d4766a9b3d
3
  size 4999819232
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef6ef93c395aa4f076a9c662b7b48a69acfeda2aa7fb905a31772792aa99eea5
3
  size 4540516256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65709c124780b704b418e9d5c20db825fb7157d4effeddf92af28c102fa5860d
3
  size 4540516256
special_tokens_map.json CHANGED
@@ -1,4 +1,9 @@
1
  {
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
@@ -13,6 +18,13 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
 
 
 
 
 
 
16
  "unk_token": {
17
  "content": "<unk>",
18
  "lstrip": false,
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
  "bos_token": {
8
  "content": "<s>",
9
  "lstrip": false,
 
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
  "unk_token": {
29
  "content": "<unk>",
30
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
@@ -25,16 +27,23 @@
25
  "special": true
26
  }
27
  },
28
- "additional_special_tokens": [],
 
 
 
 
29
  "bos_token": "<s>",
 
30
  "clean_up_tokenization_spaces": false,
31
  "eos_token": "</s>",
32
  "legacy": true,
33
- "model_max_length": 1000000000000000019884624838656,
34
- "pad_token": null,
 
35
  "sp_model_kwargs": {},
36
  "spaces_between_special_tokens": false,
 
37
  "tokenizer_class": "LlamaTokenizer",
38
  "unk_token": "<unk>",
39
- "use_default_system_prompt": false
40
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
27
  "special": true
28
  }
29
  },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
  "bos_token": "<s>",
36
+ "chat_template": "{% for message in messages %}{{bos_token + message['role'] + '\n' + message['content'] + eos_token + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ bos_token + 'assistant\n' }}{% endif %}",
37
  "clean_up_tokenization_spaces": false,
38
  "eos_token": "</s>",
39
  "legacy": true,
40
+ "model_max_length": 8192,
41
+ "pad_token": "</s>",
42
+ "padding_side": "left",
43
  "sp_model_kwargs": {},
44
  "spaces_between_special_tokens": false,
45
+ "split_special_tokens": false,
46
  "tokenizer_class": "LlamaTokenizer",
47
  "unk_token": "<unk>",
48
+ "use_default_system_prompt": true
49
  }