anicolson commited on
Commit
0fad322
1 Parent(s): 9ee0cef

Upload model

Browse files
config.json CHANGED
@@ -78,7 +78,7 @@
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
- "transformers_version": "4.29.2",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
@@ -2243,7 +2243,7 @@
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
- "transformers_version": "4.29.2",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
 
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
+ "transformers_version": "4.28.1",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
 
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
+ "transformers_version": "4.28.1",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.29.2"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
  }
modelling_single.py CHANGED
@@ -22,7 +22,7 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
22
 
23
 
24
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
25
- projected_last_hidden_state: torch.FloatTensor
26
 
27
 
28
  class CvtProjectionHead(torch.nn.Module):
@@ -76,7 +76,7 @@ class CvtWithProjectionHead(transformers.CvtPreTrainedModel):
76
  return projection
77
 
78
  return ModelOutputWithProjectionEmbedding(
79
- projected_last_hidden_state=projection,
80
  )
81
 
82
 
@@ -210,7 +210,7 @@ class SingleCXREncoderDecoderModel(VisionEncoderDecoderModel):
210
  decoder_hidden_states=decoder_outputs.hidden_states,
211
  decoder_attentions=decoder_outputs.attentions,
212
  cross_attentions=decoder_outputs.cross_attentions,
213
- encoder_last_hidden_state=encoder_outputs.projected_last_hidden_state,
214
  # encoder_hidden_states=encoder_outputs.hidden_states,
215
  # encoder_attentions=encoder_outputs.attentions,
216
  )
 
22
 
23
 
24
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
25
+ last_hidden_state: torch.FloatTensor
26
 
27
 
28
  class CvtProjectionHead(torch.nn.Module):
 
76
  return projection
77
 
78
  return ModelOutputWithProjectionEmbedding(
79
+ last_hidden_state=projection,
80
  )
81
 
82
 
 
210
  decoder_hidden_states=decoder_outputs.hidden_states,
211
  decoder_attentions=decoder_outputs.attentions,
212
  cross_attentions=decoder_outputs.cross_attentions,
213
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
214
  # encoder_hidden_states=encoder_outputs.hidden_states,
215
  # encoder_attentions=encoder_outputs.attentions,
216
  )
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d111366fdd206e51232d2f206f51de7e0707da9658a5237c694450b94935698
3
  size 449713809
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1b86278e3c7a3dcf77f4a5804f25eaaba4de4ccd85f09a87813843a6d54a87e
3
  size 449713809