|
|
|
|
|
|
|
|
|
|
|
|
|
model:
|
|
|
|
|
|
|
|
arch: blip2_protein_mistral
|
|
model_type: pretrain_protein_mistral7b
|
|
load_pretrained: True
|
|
|
|
pretrained: "lavis/output/BLIP2/Pretrain_stage2/20240408161/checkpoint_2.pth"
|
|
freeze_vit: True
|
|
max_protein_len: 1000
|
|
max_txt_len: 150
|
|
num_query_token: 32
|
|
get_eval: True
|
|
esm_size: "3b"
|
|
|
|
datasets:
|
|
GO_protein_function:
|
|
text_processor:
|
|
train:
|
|
name: "blip_caption"
|
|
|
|
run:
|
|
task: captioning
|
|
|
|
batch_size_train: 1
|
|
batch_size_eval: 1
|
|
num_workers: 4
|
|
|
|
max_len: 32
|
|
min_len: 1
|
|
num_beams: 5
|
|
|
|
seed: 42
|
|
output_dir: "output/BLIP2/protein_eval"
|
|
|
|
evaluate: True
|
|
test_splits: ["test"]
|
|
report_metric: False
|
|
|
|
device: "cuda"
|
|
world_size: 1
|
|
dist_url: "env://"
|
|
distributed: True
|
|
|