Cedaros commited on
Commit
ac86f43
1 Parent(s): 97d8f08

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +38 -1
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +48 -0
README.md CHANGED
@@ -1,3 +1,40 @@
1
  ---
2
- license: mit
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: []
3
+ tags:
4
+ - mergekit
5
+ - merge
6
+
7
  ---
8
+ # merged
9
+
10
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
11
+
12
+ ## Merge Details
13
+ ### Merge Method
14
+
15
+ This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method.
16
+
17
+ ### Models Merged
18
+
19
+ The following models were included in the merge:
20
+ * E:\UNA-TheBeagle-7b-v1
21
+ * E:\go-bruins-v2.1.1
22
+
23
+ ### Configuration
24
+
25
+ The following YAML configuration was used to produce this model:
26
+
27
+ ```yaml
28
+ dtype: float16
29
+ merge_method: linear
30
+ slices:
31
+ - sources:
32
+ - layer_range: [0, 32]
33
+ model: E:\go-bruins-v2.1.1
34
+ parameters:
35
+ weight: 1.0
36
+ - layer_range: [0, 32]
37
+ model: E:\UNA-TheBeagle-7b-v1
38
+ parameters:
39
+ weight: 1.0
40
+ ```
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
+ "bos_token": "<s>",
36
+ "clean_up_tokenization_spaces": false,
37
+ "eos_token": "</s>",
38
+ "legacy": true,
39
+ "model_max_length": 1000000000000000019884624838656,
40
+ "pad_token": null,
41
+ "padding_side": "left",
42
+ "sp_model_kwargs": {},
43
+ "spaces_between_special_tokens": false,
44
+ "split_special_tokens": false,
45
+ "tokenizer_class": "LlamaTokenizer",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": true
48
+ }