monsoon-nlp commited on
Commit
8bb9fca
1 Parent(s): d05d09e

End of training

Browse files
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: monsoon-nlp/tinyllama-mixpretrain-quinoa-sciphi
7
+ model-index:
8
+ - name: tinyllama-mixpretrain-uniprottune
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # tinyllama-mixpretrain-uniprottune
16
+
17
+ This model is a fine-tuned version of [monsoon-nlp/tinyllama-mixpretrain-quinoa-sciphi](https://huggingface.co/monsoon-nlp/tinyllama-mixpretrain-quinoa-sciphi) on the None dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 1e-05
37
+ - train_batch_size: 20
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - lr_scheduler_warmup_steps: 10
43
+ - num_epochs: 1
44
+
45
+ ### Training results
46
+
47
+
48
+
49
+ ### Framework versions
50
+
51
+ - PEFT 0.10.0
52
+ - Transformers 4.38.2
53
+ - Pytorch 2.2.1+cu121
54
+ - Datasets 2.19.0
55
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "LlamaForCausalLM",
5
+ "parent_library": "transformers.models.llama.modeling_llama"
6
+ },
7
+ "base_model_name_or_path": "monsoon-nlp/tinyllama-mixpretrain-quinoa-sciphi",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "lm_head",
27
+ "v_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": null,
31
+ "use_dora": false,
32
+ "use_rslora": false
33
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a825b1e32911dde9f4b890130eebddfacb8720ea17443cb89418ab9268c550aa
3
+ size 306917464
runs/Apr22_14-12-34_9f21a6d74a36/events.out.tfevents.1713795156.9f21a6d74a36.1212.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8a361b88d6eb6fb0e5eecf212cd525c387213a8a104ea1c1a2e8c01c1005aa
3
+ size 4676
runs/Apr22_14-12-49_9f21a6d74a36/events.out.tfevents.1713795169.9f21a6d74a36.1212.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68e2b136ca81ae9ed043ddd8fb03166c01ed8ced9b42f73a91f87139be1625ba
3
+ size 4676
runs/Apr22_14-13-40_9f21a6d74a36/events.out.tfevents.1713795221.9f21a6d74a36.1212.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ddab918a6de37292cdd12c7cfdaba98249673105534037c8411245d74057a72
3
+ size 4675
runs/Apr22_14-15-44_9f21a6d74a36/events.out.tfevents.1713795345.9f21a6d74a36.1212.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7fe95d315b5ad1ea8228962be27d3a91618eac58d41ef4e89fe40f02b9facf5
3
+ size 4675
runs/Apr22_14-17-38_9f21a6d74a36/events.out.tfevents.1713795459.9f21a6d74a36.1212.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44d46fb7160a0f78f0388056f8b5f6e8caf3abb3de5c8d50b931c98a2dd8e2f6
3
+ size 4675
runs/Apr22_14-19-18_9f21a6d74a36/events.out.tfevents.1713795558.9f21a6d74a36.1212.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac17d6c259b2237452a6eab5368b1f76a648015f3bf62a6991915eeaee6c0d2
3
+ size 4675
runs/Apr22_14-22-25_9f21a6d74a36/events.out.tfevents.1713795745.9f21a6d74a36.1212.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3461b42ec5f8752158f123d71c075f54a9fc439ef163a0872c427ad22d3966aa
3
+ size 4184
runs/Apr22_14-25-07_9f21a6d74a36/events.out.tfevents.1713795909.9f21a6d74a36.7030.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5452f89c4d4fac0db5e074404e8e8ac9b8147e2f52a0f855636a4d56c7a22f24
3
+ size 4184
runs/Apr22_14-25-58_9f21a6d74a36/events.out.tfevents.1713795960.9f21a6d74a36.7578.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf04539692709bf5665c45e30bd6a52f8428103629f9a3525b6c3531ea169e1
3
+ size 4184
runs/Apr22_14-26-47_9f21a6d74a36/events.out.tfevents.1713796009.9f21a6d74a36.7934.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42442149a953c1c3a727d7d20ef9f5e0d780d2af6f4fba7461ecff3831dfcd5f
3
+ size 4184
runs/Apr22_14-27-42_9f21a6d74a36/events.out.tfevents.1713796064.9f21a6d74a36.8313.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad0d14b2fcd9e77e094d0bf3bbaf043c835575442e04196c9572c83e414c16f
3
+ size 5872
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d17f1580a6e171c8562ca5bf833158d961bf53915bba58783a50b4da688ae8e
3
+ size 4984