File size: 1,145 Bytes
85ab89d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
model:
  arch: mini_gpt4
  model_type: pretrain_vicuna
  freeze_vit: True
  freeze_qformer: True
  # low_resource: True
  max_txt_len: 256
  end_sym: "###"
  prompt_template: '###Human: {} ###Assistant: '
  # ckpt: '/home/ubuntu/proteinchat/minigpt4/output/Meta-Llama-3-8B-Instruct-hf/20240606190/checkpoint_2.pth'
  ckpt: '/home/ubuntu/proteinchat/minigpt4/output/Llama-2-7b-chat-hf/20240606005/checkpoint_2.pth'


datasets:
  cc_sbu_align:
    vis_processor:
      train:
        name: "blip2_image_train"
        image_size: 224
    text_processor:
      train:
        name: "blip_caption"

run:
  task: image_text_pretrain
  # optimizer
  lr_sched: "linear_warmup_cosine_lr"
  init_lr: 1e-5
  min_lr: 1e-6
  warmup_lr: 1e-6

  weight_decay: 0.05
  max_epoch: 10
  # iters_per_epoch: 762
  batch_size_train: 1
  batch_size_eval: 1
  num_workers: 12
  warmup_steps: 5000

  seed: 42
  # output_dir: "ft/Meta-Llama-3-8B-Instruct-hf/"
  output_dir: "ft/Llama-2-7b-chat-hf/"

  amp: True
  resume_ckpt_path: null

  evaluate: False 
  train_splits: ["train"]

  device: "cuda"
  world_size: 1
  dist_url: "env://"
  distributed: True
  stage: 2