ZebangCheng commited on
Commit
6a13b6a
1 Parent(s): 7e745fb

train config

Browse files
train_configs/minigptv2_finetune_featureface.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ arch: minigpt_v2
3
+ model_type: pretrain
4
+ max_txt_len: 1024
5
+ image_size: 448
6
+ end_sym: "</s>"
7
+ llama_model: "/home/user/project/Emotion-LLaMA/checkpoints/Llama-2-7b-chat-hf"
8
+
9
+ ckpt: "/home/user/project/Emotion-LLaMA/checkpoints/minigptv2_checkpoint.pth"
10
+
11
+ use_grad_checkpoint: True
12
+ chat_template: True
13
+ lora_r: 64
14
+ lora_alpha: 16
15
+
16
+
17
+ datasets:
18
+ feature_face_caption:
19
+ batch_size: 1
20
+ vis_processor:
21
+ train:
22
+ name: "blip2_image_train"
23
+ image_size: 448
24
+ text_processor:
25
+ train:
26
+ name: "blip_caption"
27
+ sample_ratio: 30
28
+
29
+ run:
30
+ task: image_text_pretrain
31
+ # optimizer
32
+ lr_sched: "linear_warmup_cosine_lr"
33
+ # init_lr: 1e-5
34
+ # min_lr: 8e-5
35
+ # warmup_lr: 1e-6
36
+
37
+ init_lr: 1e-5
38
+ min_lr: 1e-6
39
+ warmup_lr: 1e-6
40
+
41
+ # init_lr: 1e-6
42
+ # min_lr: 1e-6
43
+ # warmup_lr: 1e-6
44
+
45
+ weight_decay: 0.05
46
+ max_epoch: 30
47
+ num_workers: 6
48
+ # iters_per_epoch: 5000
49
+ iters_per_epoch: 1000
50
+
51
+
52
+ warmup_steps: 1000
53
+
54
+ seed: 42
55
+ output_dir: "/home/user/project/Emotion-LLaMA/checkpoints/save_checkpoint"
56
+
57
+ amp: True
58
+ resume_ckpt_path: null
59
+
60
+ evaluate: False
61
+ train_splits: ["train"]
62
+
63
+ device: "cuda"
64
+ world_size: 2
65
+ dist_url: "env://"
66
+ distributed: True
67
+
68
+ wandb_log: False
69
+ job_name: minigptv2_finetune