File size: 774 Bytes
18131bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# Copyright (c) OpenMMLab. All rights reserved.
from transformers import PretrainedConfig


class HformerConfig(PretrainedConfig):
    model_type = 'hformer'
    _auto_class = 'AutoConfig'

    def __init__(
        self,
        num_query_token=32,
        visual_hidden_size=4096,
        llm_hidden_size=768,
        cross_attention_freq=2,
        bert="bert-base-uncased",
        bias=True,
        qformer_pth=None,
        **kwargs,
    ):
        self.num_query_token=num_query_token
        self.visual_hidden_size = visual_hidden_size
        self.llm_hidden_size = llm_hidden_size
        self.bias = bias
        self.bert = bert
        self.cross_attention_freq = cross_attention_freq
        self.qformer_pth = qformer_pth
        super().__init__(**kwargs)