bhalladitya commited on
Commit
d690c47
1 Parent(s): add6acc

End of training

Browse files
README.md CHANGED
@@ -15,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # llva-1.5-7b-scicap
17
 
18
- This model is a fine-tuned version of [llava-hf/llava-1.5-7b-hf](https://huggingface.co/llava-hf/llava-1.5-7b-hf) on an unknown dataset.
19
 
20
  ## Model description
21
 
@@ -35,12 +35,12 @@ More information needed
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 1.4e-05
38
- - train_batch_size: 1
39
  - eval_batch_size: 8
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
- - num_epochs: 1
44
  - mixed_precision_training: Native AMP
45
 
46
  ### Training results
@@ -51,6 +51,6 @@ The following hyperparameters were used during training:
51
 
52
  - PEFT 0.11.1
53
  - Transformers 4.42.3
54
- - Pytorch 2.3.0+cu121
55
- - Datasets 2.20.0
56
  - Tokenizers 0.19.1
 
15
 
16
  # llva-1.5-7b-scicap
17
 
18
+ This model is a fine-tuned version of [llava-hf/llava-1.5-7b-hf](https://huggingface.co/llava-hf/llava-1.5-7b-hf) on the None dataset.
19
 
20
  ## Model description
21
 
 
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 1.4e-05
38
+ - train_batch_size: 8
39
  - eval_batch_size: 8
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
+ - num_epochs: 3
44
  - mixed_precision_training: Native AMP
45
 
46
  ### Training results
 
51
 
52
  - PEFT 0.11.1
53
  - Transformers 4.42.3
54
+ - Pytorch 2.1.0
55
+ - Datasets 2.15.0
56
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -23,19 +23,19 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
 
 
26
  "up_proj",
27
  "k_proj",
28
- "v_proj",
29
- "linear_2",
30
- "o_proj",
31
  "linear_1",
32
- "out_proj",
33
- "fc2",
34
- "gate_proj",
35
  "lm_head",
36
- "q_proj",
37
- "down_proj",
38
- "fc1"
39
  ],
40
  "task_type": null,
41
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "down_proj",
27
+ "out_proj",
28
+ "fc2",
29
  "up_proj",
30
  "k_proj",
31
+ "q_proj",
 
 
32
  "linear_1",
33
+ "fc1",
34
+ "o_proj",
 
35
  "lm_head",
36
+ "gate_proj",
37
+ "v_proj",
38
+ "linear_2"
39
  ],
40
  "task_type": null,
41
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19dc14c53bafe6afa9c78a930a383b628cace71e3e0803bf68c9359b0a544605
3
  size 1290994752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9028ff2d70894b667e158dc9cac01b6206af72306e06f416d55d6daadeaffa
3
  size 1290994752
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7922fefb0f15a9505ad74fb2b0c736514ee911fe6c680d3eb0e287f8f1526692
3
- size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22446ff6369ea64f604493a6bfa32948dcd2da3052de375ae8181a21576c8498
3
+ size 5176