ukim4 commited on
Commit
6a470d8
0 Parent(s):

Duplicate from localmodels/LLM

Browse files
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
OpenAssistant-SFT-7-LLaMA-30B-GPTQ-4bit--1g.act.order.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15ee1b559dc726c05a849281f07fdfd9ec0f2f520dae60a0b0e87f8ccfe8c894
3
+ size 16940554392
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ duplicated_from: localmodels/LLM
3
+ ---
4
+ # OpenAssistant LLaMA 30B SFT 7 GPTQ
5
+
6
+ From: https://huggingface.co/OpenAssistant/oasst-sft-7-llama-30b-xor
7
+
8
+ ---
9
+
10
+ | Model | Bits | Group Size | Act Order (desc_act) | File Size | ExLlama Compatible? | Made With | Description |
11
+ | ------ | ---- | ---------- | -------------------- | --------- | ------------------- | --------- | ----------- |
12
+ | OpenAssistant-SFT-7-LLaMA-30B-GPTQ-4bit--1g.act.order | 4 | None | True | 16.94 GB | True | GPTQ-for-LLaMa | Most compatible. Good inference speed in AutoGPTQ and GPTQ-for-LLaMa. |
13
+
14
+ ---
15
+
16
+ # OpenAssistant LLaMA 30B SFT 7
17
+
18
+ ### Configuration
19
+
20
+ ```
21
+ llama-30b-sft-7:
22
+ dtype: fp16
23
+ log_dir: "llama_log_30b"
24
+ learning_rate: 1e-5
25
+ model_name: /home/ubuntu/Open-Assistant/model/model_training/.saved/llama-30b-super-pretrain/checkpoint-3500
26
+ #model_name: OpenAssistant/llama-30b-super-pretrain
27
+ output_dir: llama_model_30b
28
+ deepspeed_config: configs/zero3_config_sft.json
29
+ weight_decay: 0.0
30
+ residual_dropout: 0.0
31
+ max_length: 2048
32
+ use_flash_attention: true
33
+ warmup_steps: 20
34
+ gradient_checkpointing: true
35
+ gradient_accumulation_steps: 12
36
+ per_device_train_batch_size: 2
37
+ per_device_eval_batch_size: 3
38
+ eval_steps: 101
39
+ save_steps: 485
40
+ num_train_epochs: 4
41
+ save_total_limit: 3
42
+ use_custom_sampler: true
43
+ sort_by_length: false
44
+ #save_strategy: steps
45
+ save_strategy: epoch
46
+ datasets:
47
+ - oasst_export:
48
+ lang: "bg,ca,cs,da,de,en,es,fr,hr,hu,it,nl,pl,pt,ro,ru,sl,sr,sv,uk"
49
+ input_file_path: 2023-04-12_oasst_release_ready_synth.jsonl.gz
50
+ val_split: 0.05
51
+ - vicuna:
52
+ val_split: 0.05
53
+ max_val_set: 800
54
+ fraction: 1.0
55
+ - dolly15k:
56
+ val_split: 0.05
57
+ max_val_set: 300
58
+ - grade_school_math_instructions:
59
+ val_split: 0.05
60
+ - code_alpaca:
61
+ val_split: 0.05
62
+ max_val_set: 250
63
+ ```
64
+
65
+ - **OASST dataset paper:** https://arxiv.org/abs/2304.07327
added_tokens.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32004,
3
+ "<|dummy10|>": 32014,
4
+ "<|dummy11|>": 32015,
5
+ "<|dummy1|>": 32005,
6
+ "<|dummy2|>": 32006,
7
+ "<|dummy3|>": 32007,
8
+ "<|dummy4|>": 32008,
9
+ "<|dummy5|>": 32009,
10
+ "<|dummy6|>": 32010,
11
+ "<|dummy7|>": 32011,
12
+ "<|dummy8|>": 32012,
13
+ "<|dummy9|>": 32013,
14
+ "<|prefix_begin|>": 32000,
15
+ "<|prefix_end|>": 32003,
16
+ "<|prompter|>": 32002,
17
+ "<|system|>": 32001
18
+ }
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/OpenAssistant-SFT-7/",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 6656,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 17920,
12
+ "max_position_embeddings": 2048,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 52,
15
+ "num_hidden_layers": 60,
16
+ "pad_token_id": 0,
17
+ "rms_norm_eps": 1e-06,
18
+ "tie_word_embeddings": false,
19
+ "torch_dtype": "float16",
20
+ "transformers_version": "4.29.0.dev0",
21
+ "use_cache": true,
22
+ "vocab_size": 32016
23
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.29.0.dev0"
7
+ }
quantize_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": -1,
4
+ "damp_percent": 0.01,
5
+ "desc_act": true,
6
+ "sym": true,
7
+ "true_sequential": true
8
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|prompter|>",
4
+ "<|system|>",
5
+ "<|prefix_begin|>",
6
+ "<|prefix_end|>",
7
+ "<|assistant|>"
8
+ ],
9
+ "bos_token": {
10
+ "content": "",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": "</s>",
17
+ "pad_token": "</s>",
18
+ "sep_token": "<s>",
19
+ "unk_token": {
20
+ "content": "",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ }
26
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 1000000000000000019884624838656,
22
+ "pad_token": null,
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "</s>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }