TearGosling commited on
Commit
ed3ae33
1 Parent(s): 2f5d894

Uploading model

Browse files
README.md ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: NousResearch/Hermes-2-Pro-Mistral-7B
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: workspace/disk2/alexandria/models/t2g_hermes/
8
+ results: []
9
+ ---
10
+
11
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
12
+ <details><summary>See axolotl config</summary>
13
+
14
+ axolotl version: `0.4.0`
15
+ ```yaml
16
+ base_model: NousResearch/Hermes-2-Pro-Mistral-7B
17
+ model_type: MistralForCausalLM
18
+ tokenizer_type: LlamaTokenizer
19
+
20
+ load_in_8bit: false
21
+ load_in_4bit: false
22
+ strict: false
23
+
24
+ datasets:
25
+ - path: /workspace/disk2/alexandria/data/text_2_graphs_hermes.jsonl
26
+ type: sharegpt
27
+ conversation: chatml
28
+ dataset_prepared_path:
29
+ val_set_size: 0.0
30
+ output_dir: /workspace/disk2/alexandria/models/t2g_hermes/
31
+
32
+ sequence_len: 8192
33
+ sample_packing: true
34
+ pad_to_sequence_len: true
35
+ eval_sample_packing: false
36
+
37
+ wandb_project: alexandria
38
+ wandb_entity:
39
+ wandb_watch:
40
+ wandb_name:
41
+ wandb_log_model:
42
+
43
+ gradient_accumulation_steps: 1
44
+ micro_batch_size: 2
45
+ num_epochs: 1
46
+ optimizer: adamw_bnb_8bit
47
+ lr_scheduler: cosine
48
+ learning_rate: 0.000005
49
+
50
+ train_on_inputs: false
51
+ group_by_length: false
52
+ bf16: auto
53
+ fp16:
54
+ tf32: false
55
+
56
+ gradient_checkpointing: true
57
+ early_stopping_patience:
58
+ resume_from_checkpoint:
59
+ local_rank:
60
+ logging_steps: 1
61
+ xformers_attention:
62
+ flash_attention: true
63
+
64
+ warmup_steps: 10
65
+ evals_per_epoch: 0
66
+ eval_table_size:
67
+ eval_max_new_tokens: 128
68
+ saves_per_epoch: 2
69
+ debug:
70
+ deepspeed: deepspeed_configs/zero2.json
71
+ weight_decay: 0.0
72
+ fsdp:
73
+ fsdp_config:
74
+ special_tokens:
75
+ bos_token: "<s>"
76
+ eos_token: "</s>"
77
+ unk_token: "<unk>"
78
+
79
+ ```
80
+
81
+ </details><br>
82
+
83
+ # workspace/disk2/alexandria/models/t2g_hermes/
84
+
85
+ This model is a fine-tuned version of [NousResearch/Hermes-2-Pro-Mistral-7B](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B) on a version of the [Project Alexandria dataset](https://huggingface.co/datasets/ChristophSchuhmann/alexandria-test), designed to turn input plaintext into knowledge graphs structured as Python dictionaries.
86
+
87
+ ## Model description
88
+
89
+ This is a *prototype* model; trained quickly as a proof of concept. No hyperparameter tuning or extensive data cleaning besides filtering entries that met this criteria:
90
+ - Removing refusals
91
+ - Removing entries with an empty prompt or output
92
+ - Any instance of "an error occured" that shows up.
93
+
94
+ ## Intended uses & limitations
95
+
96
+ The model follows a form of ChatML, with no system prompt. You should prompt the model like this:
97
+ ```
98
+ <|im_start|>user
99
+ Here is a bunch of input text that will be turned into a knowledge graph, though usually your text will be much longer than this single sentence.<|im_end|>
100
+ <|im_start|>assistant
101
+ (Make sure to put a newline at the end of the "assistant" marker above this line. Do not include this text in parenthesis in your prompt.)
102
+ ```
103
+ Greedy sampling is recommended for generating outputs.
104
+
105
+ No extensive data cleaning has been done. The model may not output a detailed or properly formatted knowledge graph at times. Since this model is only 7B parameters, certain relationships in the input text may not be properly picked up on by the model. As stated before, this model is a prototype.
106
+
107
+ ## Training and evaluation data
108
+
109
+ The data was generated via. several large language models.
110
+
111
+ ## Training procedure
112
+
113
+ ### Training hyperparameters
114
+
115
+ The following hyperparameters were used during training:
116
+ - learning_rate: 5e-06
117
+ - train_batch_size: 2
118
+ - eval_batch_size: 2
119
+ - seed: 42
120
+ - distributed_type: multi-GPU
121
+ - num_devices: 8
122
+ - total_train_batch_size: 16
123
+ - total_eval_batch_size: 16
124
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
125
+ - lr_scheduler_type: cosine
126
+ - lr_scheduler_warmup_steps: 10
127
+ - num_epochs: 1
128
+
129
+ ### Training results
130
+
131
+
132
+
133
+ ### Framework versions
134
+
135
+ - Transformers 4.39.0.dev0
136
+ - Pytorch 2.1.2+cu118
137
+ - Datasets 2.18.0
138
+ - Tokenizers 0.15.0
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 32000,
3
+ "<|im_start|>": 32001
4
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NousResearch/Hermes-2-Pro-Mistral-7B",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.0.dev0",
24
+ "use_cache": false,
25
+ "vocab_size": 32032
26
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 32000,
6
+ "transformers_version": "4.39.0.dev0"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8efcc3cf157ebc759a6f7ff5d960863862cfe42fc70bca38a96bdac9c772750
3
+ size 14484029102
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|im_start|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ }
46
+ },
47
+ "additional_special_tokens": [],
48
+ "bos_token": "<s>",
49
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
50
+ "clean_up_tokenization_spaces": false,
51
+ "eos_token": "</s>",
52
+ "legacy": true,
53
+ "model_max_length": 1000000000000000019884624838656,
54
+ "pad_token": "</s>",
55
+ "sp_model_kwargs": {},
56
+ "spaces_between_special_tokens": false,
57
+ "tokenizer_class": "LlamaTokenizer",
58
+ "unk_token": "<unk>",
59
+ "use_default_system_prompt": false,
60
+ "use_fast": true
61
+ }