import nibabel import pydicom import numpy as np import cv2 import os def merge_dicom_images(folder_path): dicom_files = [f for f in os.listdir(folder_path) if f.endswith('.dcm')] if len(dicom_files) == 0: print("No DICOM files found in the specified folder.") return None # Read the first DICOM file to get the necessary metadata first_file_path = os.path.join(folder_path, dicom_files[0]) first_dicom = pydicom.dcmread(first_file_path) pixel_spacing = first_dicom.PixelSpacing slice_thickness = first_dicom.SliceThickness # Sort the DICOM files by their instance numbers sorted_files = sorted(dicom_files, key=lambda x: pydicom.dcmread(os.path.join(folder_path, x)).InstanceNumber) # Read and stack the DICOM images into a 3D NumPy array slices = [] for file in sorted_files: dicom = pydicom.dcmread(os.path.join(folder_path, file)) slices.append(dicom.pixel_array) # Convert the list of slices into a 3D NumPy array volume = np.array(np.stack(slices)) return volume, pixel_spacing, slice_thickness def get_dynamic_image(frames, normalized=True): """ Adapted from https://github.com/tcvrick/Python-Dynamic-Images-for-Action-Recognition""" """ Takes a list of frames and returns either a raw or normalized dynamic image.""" def _get_channel_frames(iter_frames, num_channels): """ Takes a list of frames and returns a list of frame lists split by channel. """ frames = [[] for channel in range(num_channels)] for frame in iter_frames: for channel_frames, channel in zip(frames, cv2.split(frame)): channel_frames.append(channel.reshape((*channel.shape[0:2], 1))) for i in range(len(frames)): frames[i] = np.array(frames[i]) return frames def _compute_dynamic_image(frames): """ Adapted from https://github.com/hbilen/dynamic-image-nets """ num_frames, h, w, depth = frames.shape # Compute the coefficients for the frames. coefficients = np.zeros(num_frames) for n in range(num_frames): cumulative_indices = np.array(range(n, num_frames)) + 1 coefficients[n] = np.sum(((2 * cumulative_indices) - num_frames) / cumulative_indices) # Multiply by the frames by the coefficients and sum the result. x1 = np.expand_dims(frames, axis=0) x2 = np.reshape(coefficients, (num_frames, 1, 1, 1)) result = x1 * x2 return np.sum(result[0], axis=0).squeeze() num_channels = frames[0].shape[2] # print(num_channels) channel_frames = _get_channel_frames(frames, num_channels) channel_dynamic_images = [_compute_dynamic_image(channel) for channel in channel_frames] dynamic_image = cv2.merge(tuple(channel_dynamic_images)) if normalized: dynamic_image = cv2.normalize(dynamic_image, None, 0, 255, norm_type=cv2.NORM_MINMAX) dynamic_image = dynamic_image.astype('uint8') return dynamic_image def get_video_frames(video_path): # Initialize the frame number and create empty frame list video = cv2.VideoCapture(video_path) frame_list = [] # Loop until there are no frames left. try: while True: more_frames, frame = video.read() if not more_frames: break else: frame_list.append(frame) finally: video.release() return frame_list if __name__ == "__main__": # Load the DICOM file #volume, _, _ = merge_dicom_images("ADNI/002_S_0413/MPR__GradWarp__B1_Correction__N3__Scaled/2006-05-02_12_31_52.0/I45117/processed.nii.gz") volume = nibabel.load( "../../ADNI/002_S_0413/MPR__GradWarp__B1_Correction__N3__Scaled/2006-05-02_12_31_52.0/I45117/processed.nii.gz") volume = volume.get_fdata() volume = np.reshape(volume, (109, 91, 91)) print(volume.shape) volume = np.expand_dims(volume, 3) image = get_dynamic_image(volume) image = np.expand_dims(image, 0) image = np.concatenate([image, image, image], 0) print(image.shape)