altomek commited on
Commit
9089148
1 Parent(s): 029be85

quants upload

Browse files

Personal_4B Q4_0... quant

.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Personal_4B-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Personal_4B-Q4_0.i.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Personal_4B-Q4_0_4_4.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Personal_4B-Q4_0_4_4.i.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Personal_4B-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Personal_4B-Q8_0.i.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Personal_4B.imatrix filter=lfs diff=lfs merge=lfs -text
Personal_4B-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:365528fda4dc0d25f3c29235549562c59646fc90b21b8aef014cef91db64aeef
3
+ size 2648521472
Personal_4B-Q4_0.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e219a14ed5ee99d6d93466deb6a605317201586e09d4fa712643218edb99bd58
3
+ size 2655599616
Personal_4B-Q4_0_4_4.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cae67aed4a39f3c89662e5dd3f632138d054c606547552b0c872fd50703bf47
3
+ size 2648521472
Personal_4B-Q4_0_4_4.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bbf10e6d4011b4946d6086c1e26d63a4ad59d43d0895f372328ef2523004cf
3
+ size 2648521728
Personal_4B-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bef198de2728e5770b6993f508614e79c10eece9d1c2b2f1e652fef52858dae
3
+ size 4803216128
Personal_4B-Q8_0.i.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1294e23ed1b4252a7cbaa474935775601366fa48a29210472a283bf370791fcb
3
+ size 4803216384
Personal_4B.imatrix ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8e1392540d5137a9ddf5418f47854dff8d8683993abf8b96aadc14be2495dac
3
+ size 3677442
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: jeiku/Personal_4B
5
+ tags:
6
+ - gguf
7
+ ---
8
+
9
+ # Personal_4B
10
+
11
+ GGUF quants of https://huggingface.co/jeiku/Personal_4B
README.old.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: FourOhFour/Crispy_Crab_4B
5
+ tags:
6
+ - axolotl
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: personal4B
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
17
+ <details><summary>See axolotl config</summary>
18
+
19
+ axolotl version: `0.4.1`
20
+ ```yaml
21
+ base_model: FourOhFour/Crispy_Crab_4B
22
+ model_type: AutoModelForCausalLM
23
+ tokenizer_type: AutoTokenizer
24
+
25
+ load_in_8bit: false
26
+ load_in_4bit: false
27
+ strict: false
28
+
29
+ hub_model_id: jeiku/personal4B
30
+ hub_strategy: "all_checkpoints"
31
+ push_dataset_to_hub:
32
+ hf_use_auth_token: true
33
+
34
+ datasets:
35
+ - path: jeiku/Hypno_ChatML
36
+ type: sharegpt
37
+ conversation: chatml
38
+ - path: jeiku/Soul_ChatML
39
+ type: sharegpt
40
+ conversation: chatml
41
+ - path: jeiku/Theory_Chat
42
+ type: sharegpt
43
+ conversation: chatml
44
+ - path: jeiku/Writing
45
+ type: completion
46
+ field: text
47
+
48
+ chat_template: chatml
49
+
50
+ shuffle_merged_datasets: true
51
+ val_set_size: 0.0025
52
+ output_dir: ./outputs/out
53
+
54
+ adapter:
55
+ lora_r:
56
+ lora_alpha:
57
+ lora_dropout:
58
+ lora_target_linear:
59
+
60
+ sequence_len: 8192
61
+ sample_packing: true
62
+ eval_sample_packing: false
63
+ pad_to_sequence_len: true
64
+
65
+ plugins:
66
+ - axolotl.integrations.liger.LigerPlugin
67
+ liger_rope: true
68
+ liger_rms_norm: true
69
+ liger_swiglu: true
70
+ liger_fused_linear_cross_entropy: true
71
+
72
+ wandb_project: EXP4B
73
+ wandb_entity:
74
+ wandb_watch:
75
+ wandb_name: EXP4B
76
+ wandb_log_model:
77
+
78
+ gradient_accumulation_steps: 12
79
+ micro_batch_size: 2
80
+ num_epochs: 4
81
+ optimizer: adamw_bnb_8bit
82
+ lr_scheduler: cosine
83
+ learning_rate: 0.00001
84
+ weight_decay: 0.05
85
+
86
+ train_on_inputs: false
87
+ group_by_length: false
88
+ bf16: auto
89
+ fp16:
90
+ tf32: true
91
+
92
+ gradient_checkpointing: true
93
+ early_stopping_patience:
94
+ resume_from_checkpoint:
95
+ local_rank:
96
+ logging_steps: 1
97
+ xformers_attention:
98
+ flash_attention: true
99
+
100
+ warmup_ratio: 0.1
101
+ evals_per_epoch: 2
102
+ eval_table_size:
103
+ eval_max_new_tokens: 128
104
+ saves_per_epoch: 1
105
+
106
+ debug:
107
+ deepspeed:
108
+ fsdp:
109
+ fsdp_config:
110
+
111
+ special_tokens:
112
+ pad_token: <|finetune_right_pad_id|>
113
+
114
+ ```
115
+
116
+ </details><br>
117
+
118
+ # personal4B
119
+
120
+ This model is a fine-tuned version of [FourOhFour/Crispy_Crab_4B](https://huggingface.co/FourOhFour/Crispy_Crab_4B) on the None dataset.
121
+ It achieves the following results on the evaluation set:
122
+ - Loss: 1.9273
123
+
124
+ ## Model description
125
+
126
+ More information needed
127
+
128
+ ## Intended uses & limitations
129
+
130
+ More information needed
131
+
132
+ ## Training and evaluation data
133
+
134
+ More information needed
135
+
136
+ ## Training procedure
137
+
138
+ ### Training hyperparameters
139
+
140
+ The following hyperparameters were used during training:
141
+ - learning_rate: 1e-05
142
+ - train_batch_size: 2
143
+ - eval_batch_size: 2
144
+ - seed: 42
145
+ - distributed_type: multi-GPU
146
+ - num_devices: 2
147
+ - gradient_accumulation_steps: 12
148
+ - total_train_batch_size: 48
149
+ - total_eval_batch_size: 4
150
+ - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
151
+ - lr_scheduler_type: cosine
152
+ - num_epochs: 4
153
+
154
+ ### Training results
155
+
156
+ | Training Loss | Epoch | Step | Validation Loss |
157
+ |:-------------:|:------:|:----:|:---------------:|
158
+ | 2.1634 | 0.8571 | 1 | 2.0454 |
159
+ | 2.0907 | 1.7143 | 2 | 1.9455 |
160
+ | 1.9539 | 2.5714 | 3 | 1.9296 |
161
+ | 1.9493 | 3.4286 | 4 | 1.9273 |
162
+
163
+
164
+ ### Framework versions
165
+
166
+ - Transformers 4.46.0.dev0
167
+ - Pytorch 2.4.1+cu124
168
+ - Datasets 3.0.1
169
+ - Tokenizers 0.20.1