Datasets:

Modalities:
Image
ArXiv:
Libraries:
Datasets
License:
sal4ahm commited on
Commit
8596689
·
verified ·
1 Parent(s): 41a184e

Update dataset.py

Browse files

updates for split folder and removed local dir

Files changed (1) hide show
  1. dataset.py +37 -64
dataset.py CHANGED
@@ -1,86 +1,88 @@
1
-
2
  import os
3
  import json
4
- import time
5
  from PIL import Image
6
  from torch.utils.data import Dataset, DataLoader
7
  from torchvision import transforms
8
 
9
  class RQADataset(Dataset):
10
- def __init__(self, data_config, transform=None):
11
  """
12
  Initializes the dataset.
13
 
14
  Args:
15
- data_config: Configuration object containing paths and settings.
 
16
  transform: Optional transform to be applied on a sample.
17
  """
18
- self.img_dir = data_config.img_dir
19
- self.json_dir = data_config.json_dir
20
- self.filter_list_file = data_config.filter_list
21
- self.train = data_config.train
22
  self.transform = transform or transforms.Compose([
23
- transforms.Resize((512, 512))
 
24
  ])
25
 
 
26
  self.questions = []
27
-
28
- # Load file names for testing or use all files for training
29
  self.file_names = self._load_file_names()
30
  self._create_questions()
31
  print(f"Total Questions Loaded: {len(self.questions)}")
32
 
33
  def _load_file_names(self):
34
  """
35
- Loads the list of file names to be processed.
36
 
37
  Returns:
38
  A list of file names without extensions.
39
  """
40
- if not self.train and self.filter_list_file:
41
- with open(self.filter_list_file, 'r') as f:
 
 
42
  file_names = [line.strip() for line in f]
43
- print(f"Loaded {len(file_names)} test files from {self.filter_list_file}")
44
- return file_names
45
  else:
46
- # Use all files for training
47
- return [os.path.splitext(file)[0] for file in os.listdir(self.json_dir) if file.endswith('.json')]
 
 
 
 
 
48
 
49
  def _create_questions(self):
50
  """
51
  Creates the list of questions from JSON files.
52
  """
53
- start_time = time.time()
54
  unused_count = 0
55
-
56
  for file_name in self.file_names:
57
- json_path = os.path.join(self.json_dir, file_name + '.json')
58
- if not os.path.exists(json_path):
59
- unused_count += 1
60
- continue
61
-
 
 
 
 
 
 
 
62
  with open(json_path, 'r') as f:
63
  json_data = json.load(f)
64
  for item in json_data:
65
  if 'PMC_ID' not in item or 'qa_id' not in item:
66
  continue # Ensure all necessary fields are present
67
- item['image_path'] = os.path.join(self.img_dir, item['PMC_ID'] + '.jpg')
68
  if os.path.exists(item['image_path']):
69
  self.questions.append(item)
70
  else:
71
  unused_count += 1
72
-
73
- elapsed_time = time.time() - start_time
74
- print(f"Elapsed time to create questions: {elapsed_time:.2f} seconds = {elapsed_time/60:.2f} minutes")
75
- print(f'Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}')
76
 
77
  def __len__(self):
78
  return len(self.questions)
79
 
80
  def __getitem__(self, idx):
81
- return self._load_data(idx)
82
-
83
- def _load_data(self, idx):
84
  """
85
  Loads a single data point.
86
 
@@ -132,37 +134,8 @@ class RQADataset(Dataset):
132
  }
133
 
134
  if __name__ == "__main__":
135
- # Define a simple data structure to hold the paths
136
- class DataConfig:
137
- img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
138
- json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
139
- filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
140
- train = False # Set to False to prepare the test files
141
-
142
- # Initialize dataset
143
- dataset = RQADataset(DataConfig)
144
-
145
- # Test loading a single item
146
- print(f"Number of samples in dataset: {len(dataset)}")
147
- sample = dataset[0]
148
- print("Sample data:", sample)
149
-
150
- # Initialize DataLoader
151
- dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
152
-
153
- # Test DataLoader
154
- for batch in dataloader:
155
- print("Batch data:", batch)
156
- break # Load only one batch for testing
157
-
158
- class DataConfig:
159
- img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
160
- json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
161
- filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
162
- train = True # Set to False to prepare the test files
163
-
164
- # Initialize dataset
165
- dataset = RQADataset(DataConfig)
166
 
167
  # Test loading a single item
168
  print(f"Number of samples in dataset: {len(dataset)}")
 
 
1
  import os
2
  import json
 
3
  from PIL import Image
4
  from torch.utils.data import Dataset, DataLoader
5
  from torchvision import transforms
6
 
7
  class RQADataset(Dataset):
8
+ def __init__(self, data_dir, split='train', transform=None):
9
  """
10
  Initializes the dataset.
11
 
12
  Args:
13
+ data_dir: Base directory of the dataset on the Hugging Face Hub.
14
+ split: Split of the dataset ('train' or 'test').
15
  transform: Optional transform to be applied on a sample.
16
  """
17
+ self.data_dir = data_dir
18
+ self.split = split
 
 
19
  self.transform = transform or transforms.Compose([
20
+ transforms.Resize((512, 512)),
21
+ transforms.ToTensor()
22
  ])
23
 
24
+ # Initialize lists to hold image and question data
25
  self.questions = []
 
 
26
  self.file_names = self._load_file_names()
27
  self._create_questions()
28
  print(f"Total Questions Loaded: {len(self.questions)}")
29
 
30
  def _load_file_names(self):
31
  """
32
+ Loads the list of file names to be processed based on the split.
33
 
34
  Returns:
35
  A list of file names without extensions.
36
  """
37
+ if self.split == 'test':
38
+ # Load test file names from the list provided on Hugging Face
39
+ filter_list_file = os.path.join(self.data_dir, 'test_filenames.txt')
40
+ with open(filter_list_file, 'r') as f:
41
  file_names = [line.strip() for line in f]
42
+ print(f"Loaded {len(file_names)} test files from {filter_list_file}")
 
43
  else:
44
+ # For training, use all JSON files from all directories
45
+ file_names = []
46
+ for json_dir in ['jsons', 'jsons2', 'jsons3']:
47
+ json_dir_path = os.path.join(self.data_dir, json_dir)
48
+ json_files = [os.path.splitext(file)[0] for file in os.listdir(json_dir_path) if file.endswith('.json')]
49
+ file_names.extend(json_files)
50
+ return file_names
51
 
52
  def _create_questions(self):
53
  """
54
  Creates the list of questions from JSON files.
55
  """
 
56
  unused_count = 0
 
57
  for file_name in self.file_names:
58
+ # Determine which folder contains the current JSON file
59
+ if file_name in os.listdir(os.path.join(self.data_dir, 'jsons')):
60
+ json_path = os.path.join(self.data_dir, 'jsons', f"{file_name}.json")
61
+ img_dir = 'images'
62
+ elif file_name in os.listdir(os.path.join(self.data_dir, 'jsons2')):
63
+ json_path = os.path.join(self.data_dir, 'jsons2', f"{file_name}.json")
64
+ img_dir = 'images2'
65
+ else:
66
+ json_path = os.path.join(self.data_dir, 'jsons3', f"{file_name}.json")
67
+ img_dir = 'images3'
68
+
69
+ # Load questions from the JSON file
70
  with open(json_path, 'r') as f:
71
  json_data = json.load(f)
72
  for item in json_data:
73
  if 'PMC_ID' not in item or 'qa_id' not in item:
74
  continue # Ensure all necessary fields are present
75
+ item['image_path'] = os.path.join(self.data_dir, img_dir, f"{item['PMC_ID']}.jpg")
76
  if os.path.exists(item['image_path']):
77
  self.questions.append(item)
78
  else:
79
  unused_count += 1
80
+ print(f"Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}")
 
 
 
81
 
82
  def __len__(self):
83
  return len(self.questions)
84
 
85
  def __getitem__(self, idx):
 
 
 
86
  """
87
  Loads a single data point.
88
 
 
134
  }
135
 
136
  if __name__ == "__main__":
137
+ # Initialize dataset for training
138
+ dataset = RQADataset(data_dir='.', split='train')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
  # Test loading a single item
141
  print(f"Number of samples in dataset: {len(dataset)}")