Text Generation
Transformers
PyTorch
English
llama
Inference Endpoints
text-generation-inference
winglian commited on
Commit
74e5e52
1 Parent(s): bf97433

ep1 of filtered gpt4

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "conceptofmind/LLongMA-2-7b-16k",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 16384,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 32,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": {
21
+ "factor": 4.0,
22
+ "type": "linear"
23
+ },
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.32.0.dev0",
27
+ "use_cache": true,
28
+ "use_flash_attention": false,
29
+ "vocab_size": 32003
30
+ }
configs/oo-7b.yml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: conceptofmind/LLongMA-2-7b-16k
2
+ base_model_config: conceptofmind/LLongMA-2-7b-16k
3
+ model_type: LlamaForCausalLM
4
+ tokenizer_type: LlamaTokenizer
5
+ tokenizer_use_fast: true
6
+ tokenizer_legacy: true
7
+ load_in_8bit: false
8
+ load_in_4bit: false
9
+ strict: false
10
+ push_dataset_to_hub:
11
+ hf_use_auth_token:
12
+ datasets:
13
+ - path: openaccess-ai-collective/oo-gpt4-filtered
14
+ type: alpaca_w_system.load_open_orca_chatml
15
+ data_files:
16
+ - 1M-GPT4-Augmented-filtered-gt10.parquet
17
+ dataset_prepared_path: last_run_prepared
18
+ val_set_size: 0.01
19
+ adapter:
20
+ lora_model_dir:
21
+ sequence_len: 16384
22
+ max_packed_sequence_len:
23
+ sample_packing: true
24
+ sample_packing_eff_est: 0.99
25
+ sample_packing_seq_len_multiplier: 2
26
+ total_num_tokens: 372602546
27
+ lora_r:
28
+ lora_alpha:
29
+ lora_dropout:
30
+ lora_target_modules:
31
+ lora_target_linear:
32
+ lora_fan_in_fan_out:
33
+ wandb_project: open-long-orca-7b
34
+ wandb_watch:
35
+ wandb_run_id:
36
+ wandb_log_model:
37
+ output_dir: ./open-long-orca-7b
38
+ gradient_accumulation_steps: 1
39
+ micro_batch_size: 2
40
+ num_epochs: 4
41
+ optimizer: adamw_torch
42
+ adam_beta2: 0.95
43
+ adam_eps: 0.00001
44
+ max_grad_norm: 1.0
45
+ torchdistx_path:
46
+ lr_scheduler: cosine
47
+ lr_quadratic_warmup: true
48
+ learning_rate: 0.000017
49
+ train_on_inputs: false
50
+ group_by_length: false
51
+ bf16: true
52
+ fp16: false
53
+ tf32: true
54
+ gradient_checkpointing: true
55
+ early_stopping_patience:
56
+ resume_from_checkpoint:
57
+ local_rank:
58
+ logging_steps: 1
59
+ xformers_attention:
60
+ flash_attention: true
61
+ sdp_attention:
62
+ flash_optimum:
63
+ gptq_groupsize:
64
+ gptq_model_v1:
65
+ warmup_steps: 32
66
+ eval_steps: 284
67
+ save_steps:
68
+ debug:
69
+ deepspeed:
70
+ weight_decay: 0.1
71
+ special_tokens:
72
+ bos_token: "<s>"
73
+ eos_token: "</s>"
74
+ unk_token: "<unk>"
75
+ tokens:
76
+ - "<|im_start|>"
77
+ - "<|im_end|>"
78
+ fsdp:
79
+ - full_shard
80
+ - auto_wrap
81
+ fsdp_config:
82
+ fsdp_sync_module_states: true
83
+ fsdp_offload_params: true
84
+ fsdp_state_dict_type: FULL_STATE_DICT
85
+ fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
86
+
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 32000,
6
+ "temperature": 0.9,
7
+ "top_p": 0.6,
8
+ "transformers_version": "4.32.0.dev0"
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b85ccd6cba4b2ba200309f1fc70f4de3c3cf85e786fbe739085d101beec12a39
3
+ size 13477027621
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 16384,
23
+ "pad_token": null,
24
+ "sp_model_kwargs": {},
25
+ "tokenizer_class": "LlamaTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }