jphme commited on
Commit
9240a02
1 Parent(s): 3320d48

merged model upload

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "jphme/em_german_7b_leo2",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "jphme/em_german_7b_leo",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d17712854ab6871d808e07b0f2b01e826748f7a4553b4e18af4c0d987b5bda3
3
  size 9976570304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9aa1ed46458ed16e34b218774855ad5f8a247e7cec63f98452d8577b7b2ab51
3
  size 9976570304
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:772d1f2dbb87bf629ee3cee6eea95393a7ae4f6f05a5f475698542635b682714
3
  size 3500294472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47c9cdc80ecdbd965246e6eac2673d1f1c084c328ab0978f6502365e44db3f5d
3
  size 3500294472
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,6 +1,4 @@
1
  {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
@@ -8,7 +6,7 @@
8
  "normalized": false,
9
  "rstrip": false,
10
  "single_word": false,
11
- "special": false
12
  },
13
  "1": {
14
  "content": "<s>",
@@ -16,7 +14,7 @@
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false,
19
- "special": false
20
  },
21
  "2": {
22
  "content": "</s>",
@@ -24,7 +22,7 @@
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false,
27
- "special": false
28
  }
29
  },
30
  "additional_special_tokens": [],
@@ -38,7 +36,6 @@
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
41
- "tokenizer_file": "/root/.cache/huggingface/hub/models--LeoLM--leo-hessianai-7b/snapshots/88c5ac07006ea8f1b5d10aa4f03f0d624dd27e56/tokenizer.json",
42
  "unk_token": "<unk>",
43
  "use_default_system_prompt": true
44
  }
 
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
 
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
+ "special": true
10
  },
11
  "1": {
12
  "content": "<s>",
 
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
+ "special": true
18
  },
19
  "2": {
20
  "content": "</s>",
 
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
+ "special": true
26
  }
27
  },
28
  "additional_special_tokens": [],
 
36
  "sp_model_kwargs": {},
37
  "spaces_between_special_tokens": false,
38
  "tokenizer_class": "LlamaTokenizer",
 
39
  "unk_token": "<unk>",
40
  "use_default_system_prompt": true
41
  }