Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
datasets:
|
4 |
+
- Vi-VLM/Vista
|
5 |
+
language:
|
6 |
+
- vi
|
7 |
+
---
|
8 |
+
- Merged LoRA
|
9 |
+
- Training script
|
10 |
+
|
11 |
+
```bash
|
12 |
+
#!/bin/bash
|
13 |
+
|
14 |
+
PRETRAIN_CKPT_PATH=checkpoints/llava-qwen1.5-0.5b-pretrain-vista_description-3ep
|
15 |
+
BASE_MODEL=Qwen/Qwen1.5-0.5B
|
16 |
+
ROOT_DATA=data/llm_data
|
17 |
+
|
18 |
+
WANDB_PROJECT=chart-vision-llm CUDA_VISIBLE_DEVICES=0,1,2,3,4 deepspeed moellava/train/train_mem.py \
|
19 |
+
--lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 0.00000125 \
|
20 |
+
--deepspeed ./scripts/zero2.json \
|
21 |
+
--model_name_or_path $BASE_MODEL \
|
22 |
+
--version qwen \
|
23 |
+
--data_path $ROOT_DATA/json_files/vista_llava.json \
|
24 |
+
--image_folder ${ROOT_DATA}/coco2017/train2017 \
|
25 |
+
--image_tower google/siglip-base-patch16-256-multilingual \
|
26 |
+
--image_projector_type mlp2x_gelu \
|
27 |
+
--pretrain_mm_mlp_adapter $PRETRAIN_CKPT_PATH/mm_projector.bin \
|
28 |
+
--tune_mm_mlp_adapter True \
|
29 |
+
--mm_vision_select_layer -2 \
|
30 |
+
--mm_use_im_start_end False \
|
31 |
+
--mm_use_im_patch_token False \
|
32 |
+
--image_aspect_ratio pad \
|
33 |
+
--group_by_modality_length True \
|
34 |
+
--bf16 True \
|
35 |
+
--output_dir ./checkpoints/ft-llava-qwen1.5-0.5b-vista_llava-lora-2ep \
|
36 |
+
--num_train_epochs 2 \
|
37 |
+
--per_device_train_batch_size 8 \
|
38 |
+
--per_device_eval_batch_size 2 \
|
39 |
+
--gradient_accumulation_steps 2 \
|
40 |
+
--evaluation_strategy "no" \
|
41 |
+
--save_strategy "steps" \
|
42 |
+
--save_steps 5000 \
|
43 |
+
--save_total_limit 1 \
|
44 |
+
--learning_rate 2e-5 \
|
45 |
+
--weight_decay 0. \
|
46 |
+
--warmup_ratio 0.03 \
|
47 |
+
--lr_scheduler_type "cosine" \
|
48 |
+
--logging_steps 10 \
|
49 |
+
--tf32 True \
|
50 |
+
--model_max_length 2048 \
|
51 |
+
--gradient_checkpointing True \
|
52 |
+
--dataloader_num_workers 4 \
|
53 |
+
--lazy_preprocess True \
|
54 |
+
--report_to wandb \
|
55 |
+
--push_to_hub True \
|
56 |
+
--cache_dir cache_dir \
|
57 |
+
--run_name ft-llava-qwen1.5-0.5b-vista_llava-lora-2ep
|
58 |
+
```
|