Upload model
Browse files- config.json +1 -2
- dataset.py +5 -2
- generation_config.json +1 -1
- modelling_cxrmate_ed.py +2 -1
config.json
CHANGED
@@ -61,7 +61,6 @@
|
|
61 |
"history"
|
62 |
],
|
63 |
"min_length": 0,
|
64 |
-
"mlp_bias": false,
|
65 |
"model_type": "llama",
|
66 |
"no_repeat_ngram_size": 0,
|
67 |
"num_attention_heads": 12,
|
@@ -238,5 +237,5 @@
|
|
238 |
"model_type": "vision-encoder-decoder",
|
239 |
"tie_word_embeddings": false,
|
240 |
"torch_dtype": "float32",
|
241 |
-
"transformers_version": "4.
|
242 |
}
|
|
|
61 |
"history"
|
62 |
],
|
63 |
"min_length": 0,
|
|
|
64 |
"model_type": "llama",
|
65 |
"no_repeat_ngram_size": 0,
|
66 |
"num_attention_heads": 12,
|
|
|
237 |
"model_type": "vision-encoder-decoder",
|
238 |
"tie_word_embeddings": false,
|
239 |
"torch_dtype": "float32",
|
240 |
+
"transformers_version": "4.40.2"
|
241 |
}
|
dataset.py
CHANGED
@@ -5,12 +5,15 @@ import torch
|
|
5 |
from torch.utils.data import Dataset
|
6 |
from torchvision.io import read_image
|
7 |
|
8 |
-
from tools.utils import mimic_cxr_image_path
|
9 |
-
|
10 |
# Ordered by oblique, lateral, AP, and then PA views so that PA views are closest in position to the generated tokens (and oblique is furtherest).
|
11 |
VIEW_ORDER = ['LPO', 'RAO', 'LAO', 'SWIMMERS', 'XTABLE LATERAL', 'LL', 'LATERAL', 'AP AXIAL', 'AP RLD', 'AP LLD', 'AP', 'PA RLD', 'PA LLD', 'PA']
|
12 |
|
13 |
|
|
|
|
|
|
|
|
|
|
|
14 |
class StudyIDEDStayIDSubset(Dataset):
|
15 |
"""
|
16 |
Study ID & ED stay ID subset. Examples are indexed by the study identifier.
|
|
|
5 |
from torch.utils.data import Dataset
|
6 |
from torchvision.io import read_image
|
7 |
|
|
|
|
|
8 |
# Ordered by oblique, lateral, AP, and then PA views so that PA views are closest in position to the generated tokens (and oblique is furtherest).
|
9 |
VIEW_ORDER = ['LPO', 'RAO', 'LAO', 'SWIMMERS', 'XTABLE LATERAL', 'LL', 'LATERAL', 'AP AXIAL', 'AP RLD', 'AP LLD', 'AP', 'PA RLD', 'PA LLD', 'PA']
|
10 |
|
11 |
|
12 |
+
def mimic_cxr_image_path(dir, subject_id, study_id, dicom_id, ext='dcm'):
|
13 |
+
return os.path.join(dir, 'p' + str(subject_id)[:2], 'p' + str(subject_id),
|
14 |
+
's' + str(study_id), str(dicom_id) + '.' + ext)
|
15 |
+
|
16 |
+
|
17 |
class StudyIDEDStayIDSubset(Dataset):
|
18 |
"""
|
19 |
Study ID & ED stay ID subset. Examples are indexed by the study identifier.
|
generation_config.json
CHANGED
@@ -3,5 +3,5 @@
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 4,
|
6 |
-
"transformers_version": "4.
|
7 |
}
|
|
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
"pad_token_id": 4,
|
6 |
+
"transformers_version": "4.40.2"
|
7 |
}
|
modelling_cxrmate_ed.py
CHANGED
@@ -25,7 +25,7 @@ from .modelling_uniformer import MultiUniFormerWithProjectionHead
|
|
25 |
from .records import EDCXRSubjectRecords
|
26 |
from .tables import ed_module_tables, mimic_cxr_tables
|
27 |
|
28 |
-
logger = logging.get_logger(__name__)
|
29 |
|
30 |
|
31 |
def create_lookup_table(df, columns, start_idx):
|
@@ -892,6 +892,7 @@ class MIMICIVEDCXRMultimodalModel(VisionEncoderDecoderModel):
|
|
892 |
right = torch.cat((upper_right, lower_right), dim=2)
|
893 |
|
894 |
mixed_causality_4d_attention_mask = torch.cat((left, right), dim=-1)
|
|
|
895 |
return mixed_causality_4d_attention_mask
|
896 |
|
897 |
@staticmethod
|
|
|
25 |
from .records import EDCXRSubjectRecords
|
26 |
from .tables import ed_module_tables, mimic_cxr_tables
|
27 |
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
|
30 |
|
31 |
def create_lookup_table(df, columns, start_idx):
|
|
|
892 |
right = torch.cat((upper_right, lower_right), dim=2)
|
893 |
|
894 |
mixed_causality_4d_attention_mask = torch.cat((left, right), dim=-1)
|
895 |
+
|
896 |
return mixed_causality_4d_attention_mask
|
897 |
|
898 |
@staticmethod
|