model: arch: mini_gpt4 model_type: pretrain_vicuna freeze_vit: True freeze_qformer: True max_txt_len: 256 end_sym: "###" low_resource: False prompt_template: '###Human: {} ###Assistant: ' # ckpt: '/home/ubuntu/proteinchat/minigpt4/ft/Llama-2-7b-chat-hf/20240610191/checkpoint_5.pth' ckpt: 'minigpt4/ft/Meta-Llama-3-8B-Instruct-hf/20240609203/checkpoint_5.pth' datasets: cc_sbu_align: vis_processor: train: name: "blip2_image_eval" image_size: 224 text_processor: train: name: "blip_caption" run: task: image_text_pretrain