anicolson commited on
Commit
0e5254f
·
verified ·
1 Parent(s): 6e98d03

Upload model

Browse files
config.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "architectures": [
3
- "MIMICIVEDCXRMultimodalModel"
4
  ],
5
  "auto_map": {
6
- "AutoModel": "modelling_cxrmate_ed.MIMICIVEDCXRMultimodalModel"
7
  },
8
  "decoder": {
9
  "_name_or_path": "",
 
1
  {
2
  "architectures": [
3
+ "CXRMateEDModel"
4
  ],
5
  "auto_map": {
6
+ "AutoModel": "modelling_cxrmate_ed.CXRMateEDModel"
7
  },
8
  "decoder": {
9
  "_name_or_path": "",
configuration_cxrmate_ed.py CHANGED
@@ -4,9 +4,9 @@ from transformers.utils import logging
4
  logger = logging.get_logger(__name__)
5
 
6
 
7
- class EncoderDecoderConfig(PretrainedConfig):
8
 
9
- model_type = "encoder-decoder"
10
  is_composition = True
11
 
12
  def __init__(self, **kwargs):
@@ -25,13 +25,7 @@ class EncoderDecoderConfig(PretrainedConfig):
25
  def from_encoder_decoder_configs(
26
  cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
27
  ) -> PretrainedConfig:
28
- r"""
29
- Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
30
- decoder model configuration.
31
 
32
- Returns:
33
- [`EncoderDecoderConfig`]: An instance of a configuration object
34
- """
35
  logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
36
  decoder_config.is_decoder = True
37
  decoder_config.add_cross_attention = True
 
4
  logger = logging.get_logger(__name__)
5
 
6
 
7
+ class CXRMateEDConfig(PretrainedConfig):
8
 
9
+ model_type = "cxrmate-ed"
10
  is_composition = True
11
 
12
  def __init__(self, **kwargs):
 
25
  def from_encoder_decoder_configs(
26
  cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
27
  ) -> PretrainedConfig:
 
 
 
28
 
 
 
 
29
  logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
30
  decoder_config.is_decoder = True
31
  decoder_config.add_cross_attention = True
modelling_cxrmate_ed.py CHANGED
@@ -16,7 +16,7 @@ from transformers.modeling_outputs import Seq2SeqLMOutput
16
  from transformers.modeling_utils import PreTrainedModel
17
  from transformers.utils import logging
18
 
19
- from .configuration_cxrmate_ed import EncoderDecoderConfig
20
  from .dataset import PriorsDataset
21
  from .modelling_uniformer import MultiUniFormerWithProjectionHead
22
  from .prepare_dataset import prepare_dataset
@@ -46,9 +46,9 @@ class FNNEncoder(torch.nn.Module):
46
  return self.down_proj(self.act_fn(self.up_proj(x)))
47
 
48
 
49
- class MIMICIVEDCXRMultimodalModel(VisionEncoderDecoderModel):
50
 
51
- config_class = EncoderDecoderConfig
52
  base_model_prefix = "vision_encoder_decoder"
53
  main_input_name = "input_ids"
54
  supports_gradient_checkpointing = True
@@ -69,7 +69,7 @@ class MIMICIVEDCXRMultimodalModel(VisionEncoderDecoderModel):
69
  if config is None and (encoder is None or decoder is None):
70
  raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
71
  if config is None:
72
- config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
73
  else:
74
  if not isinstance(config, self.config_class):
75
  raise ValueError(f"Config: {config} has to be of type {self.config_class}")
@@ -301,7 +301,7 @@ class MIMICIVEDCXRMultimodalModel(VisionEncoderDecoderModel):
301
  decoder = transformers.AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
302
 
303
  # instantiate config with corresponding kwargs
304
- config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
305
 
306
  # make sure input & output embeddings is not tied
307
  config.tie_word_embeddings = False
 
16
  from transformers.modeling_utils import PreTrainedModel
17
  from transformers.utils import logging
18
 
19
+ from .configuration_cxrmate_ed import CXRMateEDConfig
20
  from .dataset import PriorsDataset
21
  from .modelling_uniformer import MultiUniFormerWithProjectionHead
22
  from .prepare_dataset import prepare_dataset
 
46
  return self.down_proj(self.act_fn(self.up_proj(x)))
47
 
48
 
49
+ class CXRMateEDModel(VisionEncoderDecoderModel):
50
 
51
+ config_class = CXRMateEDConfig
52
  base_model_prefix = "vision_encoder_decoder"
53
  main_input_name = "input_ids"
54
  supports_gradient_checkpointing = True
 
69
  if config is None and (encoder is None or decoder is None):
70
  raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
71
  if config is None:
72
+ config = CXRMateEDConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
73
  else:
74
  if not isinstance(config, self.config_class):
75
  raise ValueError(f"Config: {config} has to be of type {self.config_class}")
 
301
  decoder = transformers.AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
302
 
303
  # instantiate config with corresponding kwargs
304
+ config = CXRMateEDConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
305
 
306
  # make sure input & output embeddings is not tied
307
  config.tie_word_embeddings = False