body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def apply_transform(x, transform_matrix, channel_axis=0, fill_mode='nearest', cval=0.0):
"Applies the image transformation specified by a matrix.\n\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n The transformed version of the input.\n "
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, (channel_axis + 1))
return x | -4,162,282,333,979,550,000 | Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input. | keras/preprocessing/image.py | apply_transform | HangJie720/keras | python | def apply_transform(x, transform_matrix, channel_axis=0, fill_mode='nearest', cval=0.0):
"Applies the image transformation specified by a matrix.\n\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n The transformed version of the input.\n "
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, (channel_axis + 1))
return x |
def array_to_img(x, data_format=None, scale=True):
'Converts a 3D Numpy array to a PIL Image instance.\n\n # Arguments\n x: Input Numpy array.\n data_format: Image data format.\n either "channels_first" or "channels_last".\n scale: Whether to rescale image values\n to be within `[0, 255]`.\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if invalid `x` or `data_format` is passed.\n '
if (pil_image is None):
raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if (x.ndim != 3):
raise ValueError('Expected image array to have rank 3 (single image). Got array with shape:', x.shape)
if (data_format is None):
data_format = K.image_data_format()
if (data_format not in {'channels_first', 'channels_last'}):
raise ValueError('Invalid data_format:', data_format)
if (data_format == 'channels_first'):
x = x.transpose(1, 2, 0)
if scale:
x = (x + max((- np.min(x)), 0))
x_max = np.max(x)
if (x_max != 0):
x /= x_max
x *= 255
if (x.shape[2] == 3):
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif (x.shape[2] == 1):
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2]) | 3,415,023,904,854,424,000 | Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed. | keras/preprocessing/image.py | array_to_img | HangJie720/keras | python | def array_to_img(x, data_format=None, scale=True):
'Converts a 3D Numpy array to a PIL Image instance.\n\n # Arguments\n x: Input Numpy array.\n data_format: Image data format.\n either "channels_first" or "channels_last".\n scale: Whether to rescale image values\n to be within `[0, 255]`.\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if invalid `x` or `data_format` is passed.\n '
if (pil_image is None):
raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if (x.ndim != 3):
raise ValueError('Expected image array to have rank 3 (single image). Got array with shape:', x.shape)
if (data_format is None):
data_format = K.image_data_format()
if (data_format not in {'channels_first', 'channels_last'}):
raise ValueError('Invalid data_format:', data_format)
if (data_format == 'channels_first'):
x = x.transpose(1, 2, 0)
if scale:
x = (x + max((- np.min(x)), 0))
x_max = np.max(x)
if (x_max != 0):
x /= x_max
x *= 255
if (x.shape[2] == 3):
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif (x.shape[2] == 1):
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2]) |
def img_to_array(img, data_format=None):
'Converts a PIL Image instance to a Numpy array.\n\n # Arguments\n img: PIL Image instance.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n\n # Returns\n A 3D Numpy array.\n\n # Raises\n ValueError: if invalid `img` or `data_format` is passed.\n '
if (data_format is None):
data_format = K.image_data_format()
if (data_format not in {'channels_first', 'channels_last'}):
raise ValueError('Unknown data_format: ', data_format)
x = np.asarray(img, dtype=K.floatx())
if (len(x.shape) == 3):
if (data_format == 'channels_first'):
x = x.transpose(2, 0, 1)
elif (len(x.shape) == 2):
if (data_format == 'channels_first'):
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x | 553,028,858,712,164,860 | Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed. | keras/preprocessing/image.py | img_to_array | HangJie720/keras | python | def img_to_array(img, data_format=None):
'Converts a PIL Image instance to a Numpy array.\n\n # Arguments\n img: PIL Image instance.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n\n # Returns\n A 3D Numpy array.\n\n # Raises\n ValueError: if invalid `img` or `data_format` is passed.\n '
if (data_format is None):
data_format = K.image_data_format()
if (data_format not in {'channels_first', 'channels_last'}):
raise ValueError('Unknown data_format: ', data_format)
x = np.asarray(img, dtype=K.floatx())
if (len(x.shape) == 3):
if (data_format == 'channels_first'):
x = x.transpose(2, 0, 1)
elif (len(x.shape) == 2):
if (data_format == 'channels_first'):
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x |
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
'Saves an image stored as a Numpy array to a path or file object.\n\n # Arguments\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n file_format: Optional file format override. If omitted, the\n format to use is determined from the filename extension.\n If a file object was used instead of a filename, this\n parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n '
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs) | -4,915,116,415,993,890,000 | Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`. | keras/preprocessing/image.py | save_img | HangJie720/keras | python | def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
'Saves an image stored as a Numpy array to a path or file object.\n\n # Arguments\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n file_format: Optional file format override. If omitted, the\n format to use is determined from the filename extension.\n If a file object was used instead of a filename, this\n parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n '
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs) |
def load_img(path, grayscale=False, target_size=None, interpolation='nearest'):
'Loads an image into PIL format.\n\n # Arguments\n path: Path to image file.\n grayscale: Boolean, whether to load the image as grayscale.\n target_size: Either `None` (default to original size)\n or tuple of ints `(img_height, img_width)`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are "nearest", "bilinear", and "bicubic".\n If PIL version 1.1.3 or newer is installed, "lanczos" is also\n supported. If PIL version 3.4.0 or newer is installed, "box" and\n "hamming" are also supported. By default, "nearest" is used.\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n '
if (pil_image is None):
raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if (img.mode != 'L'):
img = img.convert('L')
elif (img.mode != 'RGB'):
img = img.convert('RGB')
if (target_size is not None):
width_height_tuple = (target_size[1], target_size[0])
if (img.size != width_height_tuple):
if (interpolation not in _PIL_INTERPOLATION_METHODS):
raise ValueError('Invalid interpolation method {} specified. Supported methods are {}'.format(interpolation, ', '.join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img | -3,354,841,818,060,038,700 | Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported. | keras/preprocessing/image.py | load_img | HangJie720/keras | python | def load_img(path, grayscale=False, target_size=None, interpolation='nearest'):
'Loads an image into PIL format.\n\n # Arguments\n path: Path to image file.\n grayscale: Boolean, whether to load the image as grayscale.\n target_size: Either `None` (default to original size)\n or tuple of ints `(img_height, img_width)`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are "nearest", "bilinear", and "bicubic".\n If PIL version 1.1.3 or newer is installed, "lanczos" is also\n supported. If PIL version 3.4.0 or newer is installed, "box" and\n "hamming" are also supported. By default, "nearest" is used.\n\n # Returns\n A PIL Image instance.\n\n # Raises\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n '
if (pil_image is None):
raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if (img.mode != 'L'):
img = img.convert('L')
elif (img.mode != 'RGB'):
img = img.convert('RGB')
if (target_size is not None):
width_height_tuple = (target_size[1], target_size[0])
if (img.size != width_height_tuple):
if (interpolation not in _PIL_INTERPOLATION_METHODS):
raise ValueError('Invalid interpolation method {} specified. Supported methods are {}'.format(interpolation, ', '.join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img |
def _iter_valid_files(directory, white_list_formats, follow_links):
'Iterates on files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings containing allowed extensions for\n the files to be counted.\n follow_links: Boolean.\n\n # Yields\n Tuple of (root, filename) with extension in `white_list_formats`.\n '
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=(lambda x: x[0]))
for (root, _, files) in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn("Using '.tiff' files with multiple bands will cause distortion. Please verify your output.")
if fname.lower().endswith(('.' + extension)):
(yield (root, fname)) | 2,464,239,369,516,098,000 | Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`. | keras/preprocessing/image.py | _iter_valid_files | HangJie720/keras | python | def _iter_valid_files(directory, white_list_formats, follow_links):
'Iterates on files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings containing allowed extensions for\n the files to be counted.\n follow_links: Boolean.\n\n # Yields\n Tuple of (root, filename) with extension in `white_list_formats`.\n '
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=(lambda x: x[0]))
for (root, _, files) in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn("Using '.tiff' files with multiple bands will cause distortion. Please verify your output.")
if fname.lower().endswith(('.' + extension)):
(yield (root, fname)) |
def _count_valid_files_in_directory(directory, white_list_formats, split, follow_links):
'Counts files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: absolute path to the directory\n containing files to be counted\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n follow_links: boolean.\n\n # Returns\n the count of files with extension in `white_list_formats` contained in\n the directory.\n '
num_files = len(list(_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
(start, stop) = (int((split[0] * num_files)), int((split[1] * num_files)))
else:
(start, stop) = (0, num_files)
return (stop - start) | 6,417,385,685,608,633,000 | Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory. | keras/preprocessing/image.py | _count_valid_files_in_directory | HangJie720/keras | python | def _count_valid_files_in_directory(directory, white_list_formats, split, follow_links):
'Counts files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: absolute path to the directory\n containing files to be counted\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n follow_links: boolean.\n\n # Returns\n the count of files with extension in `white_list_formats` contained in\n the directory.\n '
num_files = len(list(_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
(start, stop) = (int((split[0] * num_files)), int((split[1] * num_files)))
else:
(start, stop) = (0, num_files)
return (stop - start) |
def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):
'Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label\n and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n class_indices: dictionary mapping a class name to its index.\n follow_links: boolean.\n\n # Returns\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`\'s parent (e.g., if `directory` is "dataset/class1",\n the filenames will be\n `["class1/file1.jpg", "class1/file2.jpg", ...]`).\n '
dirname = os.path.basename(directory)
if split:
num_files = len(list(_iter_valid_files(directory, white_list_formats, follow_links)))
(start, stop) = (int((split[0] * num_files)), int((split[1] * num_files)))
valid_files = list(_iter_valid_files(directory, white_list_formats, follow_links))[start:stop]
else:
valid_files = _iter_valid_files(directory, white_list_formats, follow_links)
classes = []
filenames = []
for (root, fname) in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return (classes, filenames) | -1,473,300,423,495,430,400 | Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`). | keras/preprocessing/image.py | _list_valid_filenames_in_directory | HangJie720/keras | python | def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):
'Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label\n and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n class_indices: dictionary mapping a class name to its index.\n follow_links: boolean.\n\n # Returns\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`\'s parent (e.g., if `directory` is "dataset/class1",\n the filenames will be\n `["class1/file1.jpg", "class1/file2.jpg", ...]`).\n '
dirname = os.path.basename(directory)
if split:
num_files = len(list(_iter_valid_files(directory, white_list_formats, follow_links)))
(start, stop) = (int((split[0] * num_files)), int((split[1] * num_files)))
valid_files = list(_iter_valid_files(directory, white_list_formats, follow_links))[start:stop]
else:
valid_files = _iter_valid_files(directory, white_list_formats, follow_links)
classes = []
filenames = []
for (root, fname) in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return (classes, filenames) |
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):
'Takes numpy data & label arrays, and generates batches of augmented data.\n\n # Arguments\n x: Input data. Numpy array of rank 4 or a tuple.\n If tuple, the first element\n should contain the images and the second element\n another numpy array or a list of numpy arrays\n that gets passed to the output\n without any modifications.\n Can be used to feed the model miscellaneous data\n along with the images.\n In case of grayscale data, the channels axis of the image array\n should have value 1, and in case\n of RGB data, it should have value 3.\n y: Labels.\n batch_size: Int (default: 32).\n shuffle: Boolean (default: True).\n sample_weight: Sample weights.\n seed: Int (default: None).\n save_to_dir: None or str (default: None).\n This allows you to optionally specify a directory\n to which to save the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str (default: `\'\'`).\n Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: one of "png", "jpeg"\n (only relevant if `save_to_dir` is set). Default: "png".\n subset: Subset of data (`"training"` or `"validation"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n # Returns\n An `Iterator` yielding tuples of `(x, y)`\n where `x` is a numpy array of image data\n (in the case of a single image input) or a list\n of numpy arrays (in the case with\n additional inputs) and `y` is a numpy array\n of corresponding labels. If \'sample_weight\' is not None,\n the yielded tuples are of the form `(x, y, sample_weight)`.\n If `y` is None, only the numpy array `x` is returned.\n '
return NumpyArrayIterator(x, y, self, batch_size=batch_size, shuffle=shuffle, sample_weight=sample_weight, seed=seed, data_format=self.data_format, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, subset=subset) | -2,937,696,480,903,132,700 | Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned. | keras/preprocessing/image.py | flow | HangJie720/keras | python | def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix=, save_format='png', subset=None):
'Takes numpy data & label arrays, and generates batches of augmented data.\n\n # Arguments\n x: Input data. Numpy array of rank 4 or a tuple.\n If tuple, the first element\n should contain the images and the second element\n another numpy array or a list of numpy arrays\n that gets passed to the output\n without any modifications.\n Can be used to feed the model miscellaneous data\n along with the images.\n In case of grayscale data, the channels axis of the image array\n should have value 1, and in case\n of RGB data, it should have value 3.\n y: Labels.\n batch_size: Int (default: 32).\n shuffle: Boolean (default: True).\n sample_weight: Sample weights.\n seed: Int (default: None).\n save_to_dir: None or str (default: None).\n This allows you to optionally specify a directory\n to which to save the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str (default: `\'\'`).\n Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: one of "png", "jpeg"\n (only relevant if `save_to_dir` is set). Default: "png".\n subset: Subset of data (`"training"` or `"validation"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n # Returns\n An `Iterator` yielding tuples of `(x, y)`\n where `x` is a numpy array of image data\n (in the case of a single image input) or a list\n of numpy arrays (in the case with\n additional inputs) and `y` is a numpy array\n of corresponding labels. If \'sample_weight\' is not None,\n the yielded tuples are of the form `(x, y, sample_weight)`.\n If `y` is None, only the numpy array `x` is returned.\n '
return NumpyArrayIterator(x, y, self, batch_size=batch_size, shuffle=shuffle, sample_weight=sample_weight, seed=seed, data_format=self.data_format, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, subset=subset) |
def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest'):
'Takes the path to a directory & generates batches of augmented data.\n\n # Arguments\n directory: Path to the target directory.\n It should contain one subdirectory per class.\n Any PNG, JPG, BMP, PPM or TIF images\n inside each of the subdirectories directory tree\n will be included in the generator.\n See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)\n for more details.\n target_size: Tuple of integers `(height, width)`,\n default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: One of "grayscale", "rbg". Default: "rgb".\n Whether the images will be converted to\n have 1 or 3 color channels.\n classes: Optional list of class subdirectories\n (e.g. `[\'dogs\', \'cats\']`). Default: None.\n If not provided, the list of classes will be automatically\n inferred from the subdirectory names/structure\n under `directory`, where each subdirectory will\n be treated as a different class\n (and the order of the classes, which will map to the label\n indices, will be alphanumeric).\n The dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: One of "categorical", "binary", "sparse",\n "input", or None. Default: "categorical".\n Determines the type of label arrays that are returned:\n - "categorical" will be 2D one-hot encoded labels,\n - "binary" will be 1D binary labels,\n "sparse" will be 1D integer labels,\n - "input" will be images identical\n to input images (mainly used to work with autoencoders).\n - If None, no labels are returned\n (the generator will only yield batches of image data,\n which is useful to use with `model.predict_generator()`,\n `model.evaluate_generator()`, etc.).\n Please note that in case of class_mode None,\n the data still needs to reside in a subdirectory\n of `directory` for it to work correctly.\n batch_size: Size of the batches of data (default: 32).\n shuffle: Whether to shuffle the data (default: True)\n seed: Optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None).\n This allows you to optionally specify\n a directory to which to save\n the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str. Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: One of "png", "jpeg"\n (only relevant if `save_to_dir` is set). Default: "png".\n follow_links: Whether to follow symlinks inside\n class subdirectories (default: False).\n subset: Subset of data (`"training"` or `"validation"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to\n resample the image if the\n target size is different from that of the loaded image.\n Supported methods are `"nearest"`, `"bilinear"`,\n and `"bicubic"`.\n If PIL version 1.1.3 or newer is installed, `"lanczos"` is also\n supported. If PIL version 3.4.0 or newer is installed,\n `"box"` and `"hamming"` are also supported.\n By default, `"nearest"` is used.\n\n # Returns\n A `DirectoryIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n '
return DirectoryIterator(directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation) | -610,350,686,517,741,700 | Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels. | keras/preprocessing/image.py | flow_from_directory | HangJie720/keras | python | def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix=, save_format='png', follow_links=False, subset=None, interpolation='nearest'):
'Takes the path to a directory & generates batches of augmented data.\n\n # Arguments\n directory: Path to the target directory.\n It should contain one subdirectory per class.\n Any PNG, JPG, BMP, PPM or TIF images\n inside each of the subdirectories directory tree\n will be included in the generator.\n See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)\n for more details.\n target_size: Tuple of integers `(height, width)`,\n default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: One of "grayscale", "rbg". Default: "rgb".\n Whether the images will be converted to\n have 1 or 3 color channels.\n classes: Optional list of class subdirectories\n (e.g. `[\'dogs\', \'cats\']`). Default: None.\n If not provided, the list of classes will be automatically\n inferred from the subdirectory names/structure\n under `directory`, where each subdirectory will\n be treated as a different class\n (and the order of the classes, which will map to the label\n indices, will be alphanumeric).\n The dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: One of "categorical", "binary", "sparse",\n "input", or None. Default: "categorical".\n Determines the type of label arrays that are returned:\n - "categorical" will be 2D one-hot encoded labels,\n - "binary" will be 1D binary labels,\n "sparse" will be 1D integer labels,\n - "input" will be images identical\n to input images (mainly used to work with autoencoders).\n - If None, no labels are returned\n (the generator will only yield batches of image data,\n which is useful to use with `model.predict_generator()`,\n `model.evaluate_generator()`, etc.).\n Please note that in case of class_mode None,\n the data still needs to reside in a subdirectory\n of `directory` for it to work correctly.\n batch_size: Size of the batches of data (default: 32).\n shuffle: Whether to shuffle the data (default: True)\n seed: Optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None).\n This allows you to optionally specify\n a directory to which to save\n the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str. Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: One of "png", "jpeg"\n (only relevant if `save_to_dir` is set). Default: "png".\n follow_links: Whether to follow symlinks inside\n class subdirectories (default: False).\n subset: Subset of data (`"training"` or `"validation"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to\n resample the image if the\n target size is different from that of the loaded image.\n Supported methods are `"nearest"`, `"bilinear"`,\n and `"bicubic"`.\n If PIL version 1.1.3 or newer is installed, `"lanczos"` is also\n supported. If PIL version 3.4.0 or newer is installed,\n `"box"` and `"hamming"` are also supported.\n By default, `"nearest"` is used.\n\n # Returns\n A `DirectoryIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n '
return DirectoryIterator(directory, self, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation) |
def standardize(self, x):
'Applies the normalization configuration to a batch of inputs.\n\n # Arguments\n x: Batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n '
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if (self.mean is not None):
x -= self.mean
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_center`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.featurewise_std_normalization:
if (self.std is not None):
x /= (self.std + K.epsilon())
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.zca_whitening:
if (self.principal_components is not None):
flatx = np.reshape(x, ((- 1), np.prod(x.shape[(- 3):])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn("This ImageDataGenerator specifies `zca_whitening`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
return x | 3,214,915,977,443,593,700 | Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized. | keras/preprocessing/image.py | standardize | HangJie720/keras | python | def standardize(self, x):
'Applies the normalization configuration to a batch of inputs.\n\n # Arguments\n x: Batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n '
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if (self.mean is not None):
x -= self.mean
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_center`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.featurewise_std_normalization:
if (self.std is not None):
x /= (self.std + K.epsilon())
else:
warnings.warn("This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
if self.zca_whitening:
if (self.principal_components is not None):
flatx = np.reshape(x, ((- 1), np.prod(x.shape[(- 3):])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn("This ImageDataGenerator specifies `zca_whitening`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.")
return x |
def random_transform(self, x, seed=None):
'Randomly augments a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: Random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n '
img_row_axis = (self.row_axis - 1)
img_col_axis = (self.col_axis - 1)
img_channel_axis = (self.channel_axis - 1)
if (seed is not None):
np.random.seed(seed)
if self.rotation_range:
theta = np.deg2rad(np.random.uniform((- self.rotation_range), self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try:
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([(- 1), 1])
except ValueError:
tx = np.random.uniform((- self.height_shift_range), self.height_shift_range)
if (np.max(self.height_shift_range) < 1):
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try:
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([(- 1), 1])
except ValueError:
ty = np.random.uniform((- self.width_shift_range), self.width_shift_range)
if (np.max(self.width_shift_range) < 1):
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform((- self.shear_range), self.shear_range))
else:
shear = 0
if ((self.zoom_range[0] == 1) and (self.zoom_range[1] == 1)):
(zx, zy) = (1, 1)
else:
(zx, zy) = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if (theta != 0):
rotation_matrix = np.array([[np.cos(theta), (- np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
transform_matrix = rotation_matrix
if ((tx != 0) or (ty != 0)):
shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = (shift_matrix if (transform_matrix is None) else np.dot(transform_matrix, shift_matrix))
if (shear != 0):
shear_matrix = np.array([[1, (- np.sin(shear)), 0], [0, np.cos(shear), 0], [0, 0, 1]])
transform_matrix = (shear_matrix if (transform_matrix is None) else np.dot(transform_matrix, shear_matrix))
if ((zx != 1) or (zy != 1)):
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
transform_matrix = (zoom_matrix if (transform_matrix is None) else np.dot(transform_matrix, zoom_matrix))
if (transform_matrix is not None):
(h, w) = (x.shape[img_row_axis], x.shape[img_col_axis])
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode=self.fill_mode, cval=self.cval)
if (self.channel_shift_range != 0):
x = random_channel_shift(x, self.channel_shift_range, img_channel_axis)
if self.horizontal_flip:
if (np.random.random() < 0.5):
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if (np.random.random() < 0.5):
x = flip_axis(x, img_row_axis)
if (self.brightness_range is not None):
x = random_brightness(x, self.brightness_range)
return x | 6,704,900,902,420,396,000 | Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape). | keras/preprocessing/image.py | random_transform | HangJie720/keras | python | def random_transform(self, x, seed=None):
'Randomly augments a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: Random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n '
img_row_axis = (self.row_axis - 1)
img_col_axis = (self.col_axis - 1)
img_channel_axis = (self.channel_axis - 1)
if (seed is not None):
np.random.seed(seed)
if self.rotation_range:
theta = np.deg2rad(np.random.uniform((- self.rotation_range), self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try:
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([(- 1), 1])
except ValueError:
tx = np.random.uniform((- self.height_shift_range), self.height_shift_range)
if (np.max(self.height_shift_range) < 1):
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try:
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([(- 1), 1])
except ValueError:
ty = np.random.uniform((- self.width_shift_range), self.width_shift_range)
if (np.max(self.width_shift_range) < 1):
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform((- self.shear_range), self.shear_range))
else:
shear = 0
if ((self.zoom_range[0] == 1) and (self.zoom_range[1] == 1)):
(zx, zy) = (1, 1)
else:
(zx, zy) = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if (theta != 0):
rotation_matrix = np.array([[np.cos(theta), (- np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
transform_matrix = rotation_matrix
if ((tx != 0) or (ty != 0)):
shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = (shift_matrix if (transform_matrix is None) else np.dot(transform_matrix, shift_matrix))
if (shear != 0):
shear_matrix = np.array([[1, (- np.sin(shear)), 0], [0, np.cos(shear), 0], [0, 0, 1]])
transform_matrix = (shear_matrix if (transform_matrix is None) else np.dot(transform_matrix, shear_matrix))
if ((zx != 1) or (zy != 1)):
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
transform_matrix = (zoom_matrix if (transform_matrix is None) else np.dot(transform_matrix, zoom_matrix))
if (transform_matrix is not None):
(h, w) = (x.shape[img_row_axis], x.shape[img_col_axis])
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode=self.fill_mode, cval=self.cval)
if (self.channel_shift_range != 0):
x = random_channel_shift(x, self.channel_shift_range, img_channel_axis)
if self.horizontal_flip:
if (np.random.random() < 0.5):
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if (np.random.random() < 0.5):
x = flip_axis(x, img_row_axis)
if (self.brightness_range is not None):
x = random_brightness(x, self.brightness_range)
return x |
def fit(self, x, augment=False, rounds=1, seed=None):
'Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n # Arguments\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, and in case\n of RGB data, it should have value 3.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n '
x = np.asarray(x, dtype=K.floatx())
if (x.ndim != 4):
raise ValueError(('Input to `.fit()` should have rank 4. Got array with shape: ' + str(x.shape)))
if (x.shape[self.channel_axis] not in {1, 3, 4}):
warnings.warn((((((((((('Expected input to be images (as Numpy array) following the data format convention "' + self.data_format) + '" (channels on axis ') + str(self.channel_axis)) + '), i.e. expected either 1, 3 or 4 channels on axis ') + str(self.channel_axis)) + '. However, it was passed an array with shape ') + str(x.shape)) + ' (') + str(x.shape[self.channel_axis])) + ' channels).'))
if (seed is not None):
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple(([(rounds * x.shape[0])] + list(x.shape)[1:])), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[(i + (r * x.shape[0]))] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[(self.channel_axis - 1)] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[(self.channel_axis - 1)] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], ((x.shape[1] * x.shape[2]) * x.shape[3])))
sigma = (np.dot(flat_x.T, flat_x) / flat_x.shape[0])
(u, s, _) = linalg.svd(sigma)
s_inv = (1.0 / np.sqrt((s[np.newaxis] + self.zca_epsilon)))
self.principal_components = (u * s_inv).dot(u.T) | 9,041,075,605,165,846,000 | Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed. | keras/preprocessing/image.py | fit | HangJie720/keras | python | def fit(self, x, augment=False, rounds=1, seed=None):
'Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n # Arguments\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, and in case\n of RGB data, it should have value 3.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n '
x = np.asarray(x, dtype=K.floatx())
if (x.ndim != 4):
raise ValueError(('Input to `.fit()` should have rank 4. Got array with shape: ' + str(x.shape)))
if (x.shape[self.channel_axis] not in {1, 3, 4}):
warnings.warn((((((((((('Expected input to be images (as Numpy array) following the data format convention "' + self.data_format) + '" (channels on axis ') + str(self.channel_axis)) + '), i.e. expected either 1, 3 or 4 channels on axis ') + str(self.channel_axis)) + '. However, it was passed an array with shape ') + str(x.shape)) + ' (') + str(x.shape[self.channel_axis])) + ' channels).'))
if (seed is not None):
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple(([(rounds * x.shape[0])] + list(x.shape)[1:])), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[(i + (r * x.shape[0]))] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[(self.channel_axis - 1)] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[(self.channel_axis - 1)] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], ((x.shape[1] * x.shape[2]) * x.shape[3])))
sigma = (np.dot(flat_x.T, flat_x) / flat_x.shape[0])
(u, s, _) = linalg.svd(sigma)
s_inv = (1.0 / np.sqrt((s[np.newaxis] + self.zca_epsilon)))
self.principal_components = (u * s_inv).dot(u.T) |
def _get_batches_of_transformed_samples(self, index_array):
'Gets a batch of transformed samples.\n\n # Arguments\n index_array: Array of sample indices to include in batch.\n\n # Returns\n A batch of transformed samples.\n '
raise NotImplementedError | 1,617,728,283,657,301,500 | Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples. | keras/preprocessing/image.py | _get_batches_of_transformed_samples | HangJie720/keras | python | def _get_batches_of_transformed_samples(self, index_array):
'Gets a batch of transformed samples.\n\n # Arguments\n index_array: Array of sample indices to include in batch.\n\n # Returns\n A batch of transformed samples.\n '
raise NotImplementedError |
def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) | -782,806,053,129,658,100 | For python 2.x.
# Returns
The next batch. | keras/preprocessing/image.py | next | HangJie720/keras | python | def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) |
def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) | -782,806,053,129,658,100 | For python 2.x.
# Returns
The next batch. | keras/preprocessing/image.py | next | HangJie720/keras | python | def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) |
def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n '
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root | 7,610,696,560,224,901,000 | Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root. | struct2tensor/expression_impl/reroot.py | reroot | anukaal/struct2tensor | python | def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n '
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root |
def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root | 2,506,666,732,620,582,000 | Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor. | struct2tensor/expression_impl/reroot.py | __init__ | anukaal/struct2tensor | python | def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root |
@classmethod
def ensure_user(cls, email, password, is_internal=False, is_admin=False, is_staff=False, is_researcher=False, is_contributor=True, is_user_manager=False, is_exporter=False, is_active=True, in_beta=False, in_alpha=False, update=False, **kwargs):
'\n Create a new user.\n '
from app.extensions import db
user = User.find(email=email)
if (user is None):
user = User(password=password, email=email, is_internal=is_internal, is_admin=is_admin, is_staff=is_staff, is_active=is_active, is_researcher=is_researcher, is_contributor=is_contributor, is_user_manager=is_user_manager, is_exporter=is_exporter, in_beta=in_beta, in_alpha=in_alpha, **kwargs)
with db.session.begin():
db.session.add(user)
log.info(('New user created: %r' % (user,)))
elif update:
user.password = password
user.is_internal = is_internal
user.is_admin = is_admin
user.is_staff = is_staff
user.is_researcher = is_researcher
user.is_contributor = is_contributor
user.is_user_manager = is_user_manager
user.is_exporter = is_exporter
user.is_active = is_active
user.in_beta = in_beta
user.in_alpha = in_alpha
with db.session.begin():
db.session.merge(user)
log.info(('Updated user: %r' % (user,)))
db.session.refresh(user)
return user | 4,482,907,887,963,877,000 | Create a new user. | app/modules/users/models.py | ensure_user | karenc/houston | python | @classmethod
def ensure_user(cls, email, password, is_internal=False, is_admin=False, is_staff=False, is_researcher=False, is_contributor=True, is_user_manager=False, is_exporter=False, is_active=True, in_beta=False, in_alpha=False, update=False, **kwargs):
'\n \n '
from app.extensions import db
user = User.find(email=email)
if (user is None):
user = User(password=password, email=email, is_internal=is_internal, is_admin=is_admin, is_staff=is_staff, is_active=is_active, is_researcher=is_researcher, is_contributor=is_contributor, is_user_manager=is_user_manager, is_exporter=is_exporter, in_beta=in_beta, in_alpha=in_alpha, **kwargs)
with db.session.begin():
db.session.add(user)
log.info(('New user created: %r' % (user,)))
elif update:
user.password = password
user.is_internal = is_internal
user.is_admin = is_admin
user.is_staff = is_staff
user.is_researcher = is_researcher
user.is_contributor = is_contributor
user.is_user_manager = is_user_manager
user.is_exporter = is_exporter
user.is_active = is_active
user.in_beta = in_beta
user.in_alpha = in_alpha
with db.session.begin():
db.session.merge(user)
log.info(('Updated user: %r' % (user,)))
db.session.refresh(user)
return user |
def test_pcap_input():
'test_pcap_input\n '
print('Testing PcapDataset')
pcap_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_pcap', 'http.pcap')
file_url = ('file://' + pcap_filename)
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if (packets_total == 0):
assert (packet_timestamp.numpy()[0] == 1084443427.311224)
assert (len(packet_data.numpy()[0]) == 62)
packets_total += 1
assert (packets_total == 43) | 5,723,679,461,885,828,000 | test_pcap_input | tests/test_pcap_eager.py | test_pcap_input | HubBucket-Team/io | python | def ():
'\n '
print('Testing PcapDataset')
pcap_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_pcap', 'http.pcap')
file_url = ('file://' + pcap_filename)
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if (packets_total == 0):
assert (packet_timestamp.numpy()[0] == 1084443427.311224)
assert (len(packet_data.numpy()[0]) == 62)
packets_total += 1
assert (packets_total == 43) |
def requests_error_handler(func):
"Re-raise ConnectionError with help message.\n Continue on HTTP 404 error (server is on but workflow doesn't exist).\n Otherwise, re-raise from None to hide nested tracebacks.\n "
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if (err.response.status_code == 404):
logger.error("Workflow doesn't seem to exist.")
return
message = '{err}\n\nCromwell server is on but got an HTTP error other than 404. '.format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = '{err}\n\nFailed to connect to Cromwell server. Check if Caper server is running. Also check if hostname and port are correct. method={method}, url={url}'.format(err=err, method=err.request.method, url=err.request.url)
raise ConnectionError(message) from None
return wrapper | -771,253,056,658,749,200 | Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks. | caper/cromwell_rest_api.py | requests_error_handler | ENCODE-DCC/caper | python | def requests_error_handler(func):
"Re-raise ConnectionError with help message.\n Continue on HTTP 404 error (server is on but workflow doesn't exist).\n Otherwise, re-raise from None to hide nested tracebacks.\n "
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if (err.response.status_code == 404):
logger.error("Workflow doesn't seem to exist.")
return
message = '{err}\n\nCromwell server is on but got an HTTP error other than 404. '.format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = '{err}\n\nFailed to connect to Cromwell server. Check if Caper server is running. Also check if hostname and port are correct. method={method}, url={url}'.format(err=err, method=err.request.method, url=err.request.url)
raise ConnectionError(message) from None
return wrapper |
def is_valid_uuid(workflow_id, version=4):
"To validate Cromwell's UUID (lowercase only).\n This does not allow uppercase UUIDs.\n "
if (not isinstance(workflow_id, str)):
return False
if (not workflow_id.islower()):
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True | 3,885,909,614,512,867,000 | To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs. | caper/cromwell_rest_api.py | is_valid_uuid | ENCODE-DCC/caper | python | def is_valid_uuid(workflow_id, version=4):
"To validate Cromwell's UUID (lowercase only).\n This does not allow uppercase UUIDs.\n "
if (not isinstance(workflow_id, str)):
return False
if (not workflow_id.islower()):
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True |
def has_wildcard(workflow_id_or_label):
'Check if string or any element in list/tuple has\n a wildcard (? or *).\n\n Args:\n workflow_id_or_label:\n Workflow ID (str) or label (str).\n Or array (list, tuple) of them.\n '
if (workflow_id_or_label is None):
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return (('?' in workflow_id_or_label) or ('*' in workflow_id_or_label)) | -6,452,021,859,195,065,000 | Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them. | caper/cromwell_rest_api.py | has_wildcard | ENCODE-DCC/caper | python | def has_wildcard(workflow_id_or_label):
'Check if string or any element in list/tuple has\n a wildcard (? or *).\n\n Args:\n workflow_id_or_label:\n Workflow ID (str) or label (str).\n Or array (list, tuple) of them.\n '
if (workflow_id_or_label is None):
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return (('?' in workflow_id_or_label) or ('*' in workflow_id_or_label)) |
def submit(self, source, dependencies=None, inputs=None, options=None, labels=None, on_hold=False):
'Submit a workflow.\n\n Returns:\n JSON Response from POST request submit a workflow\n '
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r | -423,739,342,744,370,100 | Submit a workflow.
Returns:
JSON Response from POST request submit a workflow | caper/cromwell_rest_api.py | submit | ENCODE-DCC/caper | python | def submit(self, source, dependencies=None, inputs=None, options=None, labels=None, on_hold=False):
'Submit a workflow.\n\n Returns:\n JSON Response from POST request submit a workflow\n '
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r |
def abort(self, workflow_ids=None, labels=None):
'Abort workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for aborting workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id))
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result | 2,118,086,710,568,472,300 | Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows | caper/cromwell_rest_api.py | abort | ENCODE-DCC/caper | python | def abort(self, workflow_ids=None, labels=None):
'Abort workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for aborting workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id))
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result |
def release_hold(self, workflow_ids=None, labels=None):
'Release hold of workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for releasing hold of workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id))
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result | -4,205,324,820,267,993,600 | Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows | caper/cromwell_rest_api.py | release_hold | ENCODE-DCC/caper | python | def release_hold(self, workflow_ids=None, labels=None):
'Release hold of workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for releasing hold of workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id))
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result |
def get_default_backend(self):
'Retrieve default backend name\n\n Returns:\n Default backend name\n '
return self.get_backends()['defaultBackend'] | 2,825,734,983,184,767,500 | Retrieve default backend name
Returns:
Default backend name | caper/cromwell_rest_api.py | get_default_backend | ENCODE-DCC/caper | python | def get_default_backend(self):
'Retrieve default backend name\n\n Returns:\n Default backend name\n '
return self.get_backends()['defaultBackend'] |
def get_backends(self):
'Retrieve available backend names and default backend name\n\n Returns:\n JSON response with keys "defaultBackend" and "supportedBackends"\n Example: {"defaultBackend":"Local","supportedBackends":\n ["Local","aws","gcp","pbs","sge","slurm"]}\n '
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND) | 6,374,964,352,086,033,000 | Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]} | caper/cromwell_rest_api.py | get_backends | ENCODE-DCC/caper | python | def get_backends(self):
'Retrieve available backend names and default backend name\n\n Returns:\n JSON response with keys "defaultBackend" and "supportedBackends"\n Example: {"defaultBackend":"Local","supportedBackends":\n ["Local","aws","gcp","pbs","sge","slurm"]}\n '
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND) |
def find_valid_workflow_ids(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).\n If so then we don't have to send the server a query to get matching workflow IDs.\n "
if ((not labels) and workflow_ids and all((is_valid_uuid(i) for i in workflow_ids))):
return workflow_ids
else:
workflows = self.find(workflow_ids=workflow_ids, labels=labels, exclude_subworkflow=exclude_subworkflow)
if (not workflows):
return
return [w['id'] for w in workflows] | -8,487,964,851,941,723,000 | Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs. | caper/cromwell_rest_api.py | find_valid_workflow_ids | ENCODE-DCC/caper | python | def find_valid_workflow_ids(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).\n If so then we don't have to send the server a query to get matching workflow IDs.\n "
if ((not labels) and workflow_ids and all((is_valid_uuid(i) for i in workflow_ids))):
return workflow_ids
else:
workflows = self.find(workflow_ids=workflow_ids, labels=labels, exclude_subworkflow=exclude_subworkflow)
if (not workflows):
return
return [w['id'] for w in workflows] |
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"Retrieve metadata for workflows matching workflow IDs or labels\n\n Args:\n workflow_ids:\n List of workflows IDs to find workflows matched.\n labels:\n List of Caper's string labels to find workflows matched.\n embed_subworkflow:\n Recursively embed subworkflow's metadata in main\n workflow's metadata.\n This flag is to mimic behavior of Cromwell run mode with -m.\n Metadata JSON generated with Cromwell run mode\n includes all subworkflows embedded in main workflow's JSON file.\n "
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id), params=params)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result | -703,684,439,540,065,200 | Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file. | caper/cromwell_rest_api.py | get_metadata | ENCODE-DCC/caper | python | def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"Retrieve metadata for workflows matching workflow IDs or labels\n\n Args:\n workflow_ids:\n List of workflows IDs to find workflows matched.\n labels:\n List of Caper's string labels to find workflows matched.\n embed_subworkflow:\n Recursively embed subworkflow's metadata in main\n workflow's metadata.\n This flag is to mimic behavior of Cromwell run mode with -m.\n Metadata JSON generated with Cromwell run mode\n includes all subworkflows embedded in main workflow's JSON file.\n "
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
if (valid_workflow_ids is None):
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id), params=params)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result |
def get_labels(self, workflow_id):
'Get labels JSON for a specified workflow\n\n Returns:\n Labels JSON for a workflow\n '
if ((workflow_id is None) or (not is_valid_uuid(workflow_id))):
return
r = self.__request_get(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id))
if (r is None):
return
return r['labels'] | -300,777,881,764,296,450 | Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow | caper/cromwell_rest_api.py | get_labels | ENCODE-DCC/caper | python | def get_labels(self, workflow_id):
'Get labels JSON for a specified workflow\n\n Returns:\n Labels JSON for a workflow\n '
if ((workflow_id is None) or (not is_valid_uuid(workflow_id))):
return
r = self.__request_get(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id))
if (r is None):
return
return r['labels'] |
def get_label(self, workflow_id, key):
'Get a label for a key in a specified workflow\n\n Returns:\n Value for a specified key in labels JSON for a workflow\n '
labels = self.get_labels(workflow_id)
if (labels is None):
return
if (key in labels):
return labels[key] | 8,523,240,131,071,194,000 | Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow | caper/cromwell_rest_api.py | get_label | ENCODE-DCC/caper | python | def get_label(self, workflow_id, key):
'Get a label for a key in a specified workflow\n\n Returns:\n Value for a specified key in labels JSON for a workflow\n '
labels = self.get_labels(workflow_id)
if (labels is None):
return
if (key in labels):
return labels[key] |
def update_labels(self, workflow_id, labels):
'Update labels for a specified workflow with\n a list of (key, val) tuples\n '
if ((workflow_id is None) or (labels is None)):
return
r = self.__request_patch(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels)
logger.debug('update_labels: {r}'.format(r=r))
return r | 6,315,808,637,045,976,000 | Update labels for a specified workflow with
a list of (key, val) tuples | caper/cromwell_rest_api.py | update_labels | ENCODE-DCC/caper | python | def update_labels(self, workflow_id, labels):
'Update labels for a specified workflow with\n a list of (key, val) tuples\n '
if ((workflow_id is None) or (labels is None)):
return
r = self.__request_patch(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels)
logger.debug('update_labels: {r}'.format(r=r))
return r |
def find_with_wildcard(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Retrieves all workflows from Cromwell server.\n And then find matching workflows by ID or labels.\n Wildcards (? and *) are allowed for both parameters.\n '
result = []
if ((not workflow_ids) and (not labels)):
return result
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow)})
if (resp and resp['results']):
for workflow in resp['results']:
matched = False
if ('id' not in workflow):
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if (labels and ('labels' in workflow)):
for (k, v) in labels:
v_ = workflow['labels'].get(k)
if (not v_):
continue
if (isinstance(v_, str) and isinstance(v, str)):
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif (v_ == v):
result.append(workflow)
break
logger.debug('find_with_wildcard: workflow_ids={workflow_ids}, labels={labels}, result={result}'.format(workflow_ids=workflow_ids, labels=labels, result=result))
return result | -267,213,617,896,036,400 | Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters. | caper/cromwell_rest_api.py | find_with_wildcard | ENCODE-DCC/caper | python | def find_with_wildcard(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Retrieves all workflows from Cromwell server.\n And then find matching workflows by ID or labels.\n Wildcards (? and *) are allowed for both parameters.\n '
result = []
if ((not workflow_ids) and (not labels)):
return result
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow)})
if (resp and resp['results']):
for workflow in resp['results']:
matched = False
if ('id' not in workflow):
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if (labels and ('labels' in workflow)):
for (k, v) in labels:
v_ = workflow['labels'].get(k)
if (not v_):
continue
if (isinstance(v_, str) and isinstance(v, str)):
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif (v_ == v):
result.append(workflow)
break
logger.debug('find_with_wildcard: workflow_ids={workflow_ids}, labels={labels}, result={result}'.format(workflow_ids=workflow_ids, labels=labels, result=result))
return result |
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
'Finds workflows by exactly matching workflow IDs (UUIDs).\n Does OR search for a list of workflow IDs.\n Invalid UUID in `workflows_ids` will be ignored without warning.\n Wildcards (? and *) are not allowed.\n\n Args:\n workflow_ids:\n List of workflow ID (UUID) strings.\n Lower-case only (Cromwell uses lower-case UUIDs).\n Returns:\n List of matched workflow JSONs.\n '
if has_wildcard(workflow_ids):
raise ValueError('Wildcards are not allowed in workflow_ids. ids={ids}'.format(ids=workflow_ids))
result = []
if workflow_ids:
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow), 'id': workflow_ids})
if (resp and resp['results']):
result.extend(resp['results'])
logger.debug('find_by_workflow_ids: workflow_ids={workflow_ids}, result={result}'.format(workflow_ids=workflow_ids, result=result))
return result | -2,741,076,046,262,680,600 | Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs. | caper/cromwell_rest_api.py | find_by_workflow_ids | ENCODE-DCC/caper | python | def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
'Finds workflows by exactly matching workflow IDs (UUIDs).\n Does OR search for a list of workflow IDs.\n Invalid UUID in `workflows_ids` will be ignored without warning.\n Wildcards (? and *) are not allowed.\n\n Args:\n workflow_ids:\n List of workflow ID (UUID) strings.\n Lower-case only (Cromwell uses lower-case UUIDs).\n Returns:\n List of matched workflow JSONs.\n '
if has_wildcard(workflow_ids):
raise ValueError('Wildcards are not allowed in workflow_ids. ids={ids}'.format(ids=workflow_ids))
result = []
if workflow_ids:
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow), 'id': workflow_ids})
if (resp and resp['results']):
result.extend(resp['results'])
logger.debug('find_by_workflow_ids: workflow_ids={workflow_ids}, result={result}'.format(workflow_ids=workflow_ids, result=result))
return result |
def find_by_labels(self, labels=None, exclude_subworkflow=True):
'Finds workflows by exactly matching labels (key, value) tuples.\n Does OR search for a list of label key/value pairs.\n Wildcards (? and *) are not allowed.\n\n Args:\n labels:\n List of labels (key/value pairs).\n Returns:\n List of matched workflow JSONs.\n '
if has_wildcard(labels):
raise ValueError('Wildcards are not allowed in labels. labels={labels}'.format(labels=labels))
result = []
if labels:
labels = ['{key}:{val}'.format(key=key, val=val) for (key, val) in labels if val]
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow), 'labelor': labels})
if (resp and resp['results']):
result.extend(resp['results'])
logger.debug('find_by_labels: labels={labels}, result={result}'.format(labels=labels, result=result))
return result | 2,556,947,127,859,812,400 | Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs. | caper/cromwell_rest_api.py | find_by_labels | ENCODE-DCC/caper | python | def find_by_labels(self, labels=None, exclude_subworkflow=True):
'Finds workflows by exactly matching labels (key, value) tuples.\n Does OR search for a list of label key/value pairs.\n Wildcards (? and *) are not allowed.\n\n Args:\n labels:\n List of labels (key/value pairs).\n Returns:\n List of matched workflow JSONs.\n '
if has_wildcard(labels):
raise ValueError('Wildcards are not allowed in labels. labels={labels}'.format(labels=labels))
result = []
if labels:
labels = ['{key}:{val}'.format(key=key, val=val) for (key, val) in labels if val]
resp = self.__request_get(CromwellRestAPI.ENDPOINT_WORKFLOWS, params={'additionalQueryResultFields': 'labels', 'includeSubworkflows': (not exclude_subworkflow), 'labelor': labels})
if (resp and resp['results']):
result.extend(resp['results'])
logger.debug('find_by_labels: labels={labels}, result={result}'.format(labels=labels, result=result))
return result |
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Wrapper for the following three find functions.\n - find_with_wildcard\n - find_by_workflow_ids\n - find_by_labels\n\n Find workflows by matching workflow IDs or label (key, value) tuples.\n Does OR search for both parameters.\n Wildcards (? and *) in both parameters are allowed but Caper will\n retrieve a list of all workflows, which can lead to HTTP 503 of\n Cromwell server if there are many subworkflows and not `exclude_subworkflow`.\n\n Args:\n workflow_ids:\n List of workflow ID (UUID) strings.\n Lower-case only.\n labels:\n List of labels (key/value pairs).\n exclude_subworkflow:\n Exclude subworkflows.\n Returns:\n List of matched workflow JSONs.\n '
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(([val for (key, val) in labels] if labels else None))
if (wildcard_found_in_workflow_ids or wildcard_found_in_labels):
return self.find_with_wildcard(workflow_ids=workflow_ids, labels=labels, exclude_subworkflow=exclude_subworkflow)
result = []
result_by_labels = self.find_by_labels(labels=labels, exclude_subworkflow=exclude_subworkflow)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend([workflow for workflow in self.find_by_workflow_ids(workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow) if (workflow['id'] not in workflow_ids_found_by_labels)])
return result | -1,235,322,776,911,959,000 | Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs. | caper/cromwell_rest_api.py | find | ENCODE-DCC/caper | python | def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Wrapper for the following three find functions.\n - find_with_wildcard\n - find_by_workflow_ids\n - find_by_labels\n\n Find workflows by matching workflow IDs or label (key, value) tuples.\n Does OR search for both parameters.\n Wildcards (? and *) in both parameters are allowed but Caper will\n retrieve a list of all workflows, which can lead to HTTP 503 of\n Cromwell server if there are many subworkflows and not `exclude_subworkflow`.\n\n Args:\n workflow_ids:\n List of workflow ID (UUID) strings.\n Lower-case only.\n labels:\n List of labels (key/value pairs).\n exclude_subworkflow:\n Exclude subworkflows.\n Returns:\n List of matched workflow JSONs.\n '
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(([val for (key, val) in labels] if labels else None))
if (wildcard_found_in_workflow_ids or wildcard_found_in_labels):
return self.find_with_wildcard(workflow_ids=workflow_ids, labels=labels, exclude_subworkflow=exclude_subworkflow)
result = []
result_by_labels = self.find_by_labels(labels=labels, exclude_subworkflow=exclude_subworkflow)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend([workflow for workflow in self.find_by_workflow_ids(workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow) if (workflow['id'] not in workflow_ids_found_by_labels)])
return result |
def __init_auth(self):
'Init auth object\n '
if ((self._user is not None) and (self._password is not None)):
self._auth = (self._user, self._password)
else:
self._auth = None | 4,628,404,205,856,784,000 | Init auth object | caper/cromwell_rest_api.py | __init_auth | ENCODE-DCC/caper | python | def __init_auth(self):
'\n '
if ((self._user is not None) and (self._password is not None)):
self._auth = (self._user, self._password)
else:
self._auth = None |
@requests_error_handler
def __request_get(self, endpoint, params=None):
'GET request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.get(url, auth=self._auth, params=params, headers={'accept': 'application/json'})
resp.raise_for_status()
return resp.json() | -7,574,936,274,253,186,000 | GET request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_get | ENCODE-DCC/caper | python | @requests_error_handler
def __request_get(self, endpoint, params=None):
'GET request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.get(url, auth=self._auth, params=params, headers={'accept': 'application/json'})
resp.raise_for_status()
return resp.json() |
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.post(url, files=manifest, auth=self._auth, headers={'accept': 'application/json'})
resp.raise_for_status()
return resp.json() | 8,571,179,937,235,917,000 | POST request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_post | ENCODE-DCC/caper | python | @requests_error_handler
def __request_post(self, endpoint, manifest=None):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.post(url, files=manifest, auth=self._auth, headers={'accept': 'application/json'})
resp.raise_for_status()
return resp.json() |
@requests_error_handler
def __request_patch(self, endpoint, data):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.patch(url, data=data, auth=self._auth, headers={'accept': 'application/json', 'content-type': 'application/json'})
resp.raise_for_status()
return resp.json() | -8,638,686,449,579,552,000 | POST request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_patch | ENCODE-DCC/caper | python | @requests_error_handler
def __request_patch(self, endpoint, data):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.patch(url, data=data, auth=self._auth, headers={'accept': 'application/json', 'content-type': 'application/json'})
resp.raise_for_status()
return resp.json() |
def _print_job_folders(self, file_list, show_plaster_json=True):
'\n file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]\n '
if (len(file_list) == 0):
print('No files found')
return
folders = {file.folder: Munch(folder=file.folder, size_gb=0, file_count=0) for file in file_list}
gb = (1024 ** 3)
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += (file.size / gb)
folders[folder].size_gb += (file.size / gb)
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient='index')
formatters = dict(size_gb='{:10.2f}'.format, folder='{:<40.40s}'.format, file_count='{:.0f}'.format)
columns = ['folder', 'size_gb', 'file_count']
df = df.append(dict(folder='TOTAL', size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters)) | 6,601,777,888,008,007,000 | file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)] | plaster/main.py | _print_job_folders | erisyon/plaster | python | def _print_job_folders(self, file_list, show_plaster_json=True):
'\n \n '
if (len(file_list) == 0):
print('No files found')
return
folders = {file.folder: Munch(folder=file.folder, size_gb=0, file_count=0) for file in file_list}
gb = (1024 ** 3)
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += (file.size / gb)
folders[folder].size_gb += (file.size / gb)
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient='index')
formatters = dict(size_gb='{:10.2f}'.format, folder='{:<40.40s}'.format, file_count='{:.0f}'.format)
columns = ['folder', 'size_gb', 'file_count']
df = df.append(dict(folder='TOTAL', size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters)) |
def run_nbstripout(self):
'Strip all notebooks of output to save space in commits'
important('Stripping Notebooks...')
result = ((local['find'][('.', '-type', 'f', '-not', '-path', '*/\\.*', '-name', '*.ipynb', '-print')] | local['xargs']['nbstripout']) & TF(FG=True))
if (not result):
raise CommandError | -8,431,873,764,699,112,000 | Strip all notebooks of output to save space in commits | plaster/main.py | run_nbstripout | erisyon/plaster | python | def run_nbstripout(self):
important('Stripping Notebooks...')
result = ((local['find'][('.', '-type', 'f', '-not', '-path', '*/\\.*', '-name', '*.ipynb', '-print')] | local['xargs']['nbstripout']) & TF(FG=True))
if (not result):
raise CommandError |
def sigproc_test(self, jobs_folder):
'\n This is adapted from zest_sigproc_v2_integration\n '
profile_folder = (jobs_folder / '_profile')
profile_folder.delete()
job_folder = (profile_folder / 'sigproc_test')
source_folder = (profile_folder / '_synth_field')
job_folder.mkdir()
source_folder.mkdir()
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = synth.PeaksModelGaussianCircular(n_peaks=n_peaks).locs_randomize().widths_uniform(psf_width).amps_constant(gain)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(str((source_folder / f'area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy')), chcy_ims[(ch_i, cy_i)])
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p([f'gen', f'sigproc_v2', f'--job={job_folder}', f'--sigproc_source={source_folder}', f'--force', f'--self_calib'])
log_file = (local.path(local.env['PLASTER_ROOT']) / 'plaster.log')
log_file.delete()
run_p(['run', job_folder, '--no_progress', '--skip_reports'])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line('--'))
print('PROFILE RESULTS')
print(h_line('--'))
profile_dump(profile_lines) | 5,582,201,818,341,143,000 | This is adapted from zest_sigproc_v2_integration | plaster/main.py | sigproc_test | erisyon/plaster | python | def sigproc_test(self, jobs_folder):
'\n \n '
profile_folder = (jobs_folder / '_profile')
profile_folder.delete()
job_folder = (profile_folder / 'sigproc_test')
source_folder = (profile_folder / '_synth_field')
job_folder.mkdir()
source_folder.mkdir()
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = synth.PeaksModelGaussianCircular(n_peaks=n_peaks).locs_randomize().widths_uniform(psf_width).amps_constant(gain)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(str((source_folder / f'area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy')), chcy_ims[(ch_i, cy_i)])
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p([f'gen', f'sigproc_v2', f'--job={job_folder}', f'--sigproc_source={source_folder}', f'--force', f'--self_calib'])
log_file = (local.path(local.env['PLASTER_ROOT']) / 'plaster.log')
log_file.delete()
run_p(['run', job_folder, '--no_progress', '--skip_reports'])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line('--'))
print('PROFILE RESULTS')
print(h_line('--'))
profile_dump(profile_lines) |
def main(self, run_path, symbol):
'\n run_path: path to the run folder\n symbol: Eg: "sigproc_v2.sig"\n '
run = RunResult(run_path)
parts = symbol.split('.')
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if (self.save_npy is not None):
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if (self.save_csv is not None):
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if (self.save_pkl is not None):
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl) | 6,500,575,292,112,177,000 | run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig" | plaster/main.py | main | erisyon/plaster | python | def main(self, run_path, symbol):
'\n run_path: path to the run folder\n symbol: Eg: "sigproc_v2.sig"\n '
run = RunResult(run_path)
parts = symbol.split('.')
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if (self.save_npy is not None):
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if (self.save_csv is not None):
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if (self.save_pkl is not None):
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl) |
def main(self, run_path):
"\n run_path: path to the run folder (don't forget this is a subfolder of job)\n "
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f'{name}__'
tell(f'Prefixing saved files with {prefix}')
tell('Saving sig.npy')
np.save(f'{prefix}sig.npy', run.sigproc_v2.sig())
tell('Saving noi.npy')
np.save(f'{prefix}noi.npy', run.sigproc_v2.noi())
tell('Saving df.csv')
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f'{prefix}df.csv')
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f'Loading align field {fl_i} of {run.sigproc_v2.n_fields}')
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell('Saving aln_ims.npy')
np.save(f'{prefix}aln_ims.npy', np.stack(ims))
tell('Saving example.py')
utils.save(f'{prefix}example.py', (((f'''import numpy as np
''' + f'''import pandas as pd
''') + f'prefix = "{prefix}"') + utils.smart_wrap('\n sig = np.load(f"{prefix}sig.npy")\n noi = np.load(f"{prefix}noi.npy")\n df = pd.read_csv(f"{prefix}df.csv")\n ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")\n n_peaks = sig.shape[0]\n n_fields, n_channels, n_cycles, im_mea, _ = ims.shape\n\n # Examine some peak\n peak_i = 123 # 0 <= peak_i < n_peaks\n ch_i = 0 # 0 <= ch_i < n_channels\n cy_i = 0 # 0 <= cy_i < n_cycles\n y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)\n peak_radius = 10\n peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]\n # Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)\n ', width=200, assert_if_exceeds_width=True)))
tell('\n\nThe following commands may be useful:')
tell(f' aws s3 cp {prefix}sig.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}noi.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}df.csv s3://erisyon-public')
tell(f' aws s3 cp {prefix}aln_ims.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}example.py s3://erisyon-public') | 1,042,366,781,578,320,500 | run_path: path to the run folder (don't forget this is a subfolder of job) | plaster/main.py | main | erisyon/plaster | python | def main(self, run_path):
"\n \n "
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f'{name}__'
tell(f'Prefixing saved files with {prefix}')
tell('Saving sig.npy')
np.save(f'{prefix}sig.npy', run.sigproc_v2.sig())
tell('Saving noi.npy')
np.save(f'{prefix}noi.npy', run.sigproc_v2.noi())
tell('Saving df.csv')
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f'{prefix}df.csv')
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f'Loading align field {fl_i} of {run.sigproc_v2.n_fields}')
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell('Saving aln_ims.npy')
np.save(f'{prefix}aln_ims.npy', np.stack(ims))
tell('Saving example.py')
utils.save(f'{prefix}example.py', (((f'import numpy as np
' + f'import pandas as pd
') + f'prefix = "{prefix}"') + utils.smart_wrap('\n sig = np.load(f"{prefix}sig.npy")\n noi = np.load(f"{prefix}noi.npy")\n df = pd.read_csv(f"{prefix}df.csv")\n ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")\n n_peaks = sig.shape[0]\n n_fields, n_channels, n_cycles, im_mea, _ = ims.shape\n\n # Examine some peak\n peak_i = 123 # 0 <= peak_i < n_peaks\n ch_i = 0 # 0 <= ch_i < n_channels\n cy_i = 0 # 0 <= cy_i < n_cycles\n y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)\n peak_radius = 10\n peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]\n # Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)\n ', width=200, assert_if_exceeds_width=True)))
tell('\n\nThe following commands may be useful:')
tell(f' aws s3 cp {prefix}sig.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}noi.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}df.csv s3://erisyon-public')
tell(f' aws s3 cp {prefix}aln_ims.npy s3://erisyon-public')
tell(f' aws s3 cp {prefix}example.py s3://erisyon-public') |
def solve_polynomial_join(left, right, reverse=0):
"\n Solves for a parametric cubic polynomial curve joining the right side of left\n to the left side of right. The curve matches slope and position at it's\n boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1\n being the right.\n\n method: parametric cubic matching position and slope of endpoints.\n This ends up being cheap to compute, since the matrix is\n known (interval of parameter is always 0 to 1) and so the \n inverse can be precomputed. \n minv is inverse of m, where:\n m = array( [ [ a**3, a**2, a, 1 ],\n [ b**3, b**2, b, 1 ], \n [ 3*a**2, 2*a , 1, 0 ],\n [ 3*b**2, 2*b , 1, 0 ] ] )\n is the matrix for the linear system:\n m * coeff = v,\n with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].\n Here a = 0 and b = 1 so m and it's inverse is always the same.\n \n "
minv = matrix([[2.0, (- 2.0), 1.0, 1.0], [(- 3.0), 3.0, (- 2.0), (- 1.0)], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
lr = len(right)
ll = len(left)
nl = (ll / 4)
nr = (lr / 4)
slope = (lambda v: (v[0] - v[(- 1)]))
length = (lambda x, y: hypot(diff(x), diff(y)).sum())
if ((nr < 2) and (nl < 2)):
lnorm = length(left.x, left.y)
rnorm = length(right.x, right.y)
dly = (diff(left.y).mean() / lnorm)
dlx = (diff(left.x).mean() / lnorm)
dry = (diff(right.y).mean() / rnorm)
drx = (diff(right.x).mean() / rnorm)
nl = 0
nr = (lr - 1)
elif (nr < 2):
lnorm = length(left.x[:nl], left.y[:nl])
rnorm = length(right.x, right.y)
dly = ((- slope(left.y[(- nl):])) / lnorm)
dlx = ((- slope(left.x[(- nl):])) / lnorm)
dry = (diff(right.y).mean() / rnorm)
drx = (diff(right.x).mean() / rnorm)
nr = (lr - 1)
elif (nl < 2):
rnorm = length(right.x[:nr], right.y[:nr])
lnorm = length(left.x, left.y)
dry = ((- slope(right.y[:nr])) / rnorm)
drx = ((- slope(right.x[:nr])) / rnorm)
dly = (diff(left.y).mean() / lnorm)
dlx = (diff(left.x).mean() / lnorm)
nl = 0
else:
rnorm = length(right.x[:nr], right.y[:nr])
lnorm = length(left.x[(- nl):], left.y[(- nl):])
dry = ((- slope(right.y[:nr])) / rnorm)
drx = ((- slope(right.x[:nr])) / rnorm)
dly = ((- slope(left.y[(- nl):])) / lnorm)
dlx = ((- slope(left.x[(- nl):])) / lnorm)
rnorm = hypot((left.x[0] - right.x[0]), (left.y[0] - right.y[0]))
lnorm = hypot((left.x[(- 1)] - right.x[0]), (left.y[(- 1)] - right.y[0]))
if (not isfinite(dlx)):
dlx = ((left.x[0] - right.x[0]) / lnorm)
if (not isfinite(dly)):
dly = ((left.y[0] - right.y[0]) / lnorm)
if (not isfinite(drx)):
drx = ((left.x[(- 1)] - right.x[0]) / rnorm)
if (not isfinite(dry)):
dry = ((left.y[(- 1)] - right.y[0]) / rnorm)
if reverse:
dlx = (- dlx)
dly = (- dly)
drx = (- drx)
dry = (- dry)
ry = right.y[0]
ly = left.y[(- 1)]
rx = right.x[0]
lx = left.x[(- 1)]
L = hypot((rx - lx), (ry - ly))
print(('L:%g' % L))
yv = matrix([[ly], [ry], [(dly * L)], [(dry * L)]])
xv = matrix([[lx], [rx], [(dlx * L)], [(drx * L)]])
cx = (minv * xv)
cy = (minv * yv)
if (not (isfinite(cx).any() and isfinite(cy).any())):
pdb.set_trace()
return [array(t).squeeze() for t in (cx, cy)] | 4,725,810,013,788,473,000 | Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same. | whisk/test_merge3.py | solve_polynomial_join | aiporre/whisk | python | def solve_polynomial_join(left, right, reverse=0):
"\n Solves for a parametric cubic polynomial curve joining the right side of left\n to the left side of right. The curve matches slope and position at it's\n boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1\n being the right.\n\n method: parametric cubic matching position and slope of endpoints.\n This ends up being cheap to compute, since the matrix is\n known (interval of parameter is always 0 to 1) and so the \n inverse can be precomputed. \n minv is inverse of m, where:\n m = array( [ [ a**3, a**2, a, 1 ],\n [ b**3, b**2, b, 1 ], \n [ 3*a**2, 2*a , 1, 0 ],\n [ 3*b**2, 2*b , 1, 0 ] ] )\n is the matrix for the linear system:\n m * coeff = v,\n with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].\n Here a = 0 and b = 1 so m and it's inverse is always the same.\n \n "
minv = matrix([[2.0, (- 2.0), 1.0, 1.0], [(- 3.0), 3.0, (- 2.0), (- 1.0)], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
lr = len(right)
ll = len(left)
nl = (ll / 4)
nr = (lr / 4)
slope = (lambda v: (v[0] - v[(- 1)]))
length = (lambda x, y: hypot(diff(x), diff(y)).sum())
if ((nr < 2) and (nl < 2)):
lnorm = length(left.x, left.y)
rnorm = length(right.x, right.y)
dly = (diff(left.y).mean() / lnorm)
dlx = (diff(left.x).mean() / lnorm)
dry = (diff(right.y).mean() / rnorm)
drx = (diff(right.x).mean() / rnorm)
nl = 0
nr = (lr - 1)
elif (nr < 2):
lnorm = length(left.x[:nl], left.y[:nl])
rnorm = length(right.x, right.y)
dly = ((- slope(left.y[(- nl):])) / lnorm)
dlx = ((- slope(left.x[(- nl):])) / lnorm)
dry = (diff(right.y).mean() / rnorm)
drx = (diff(right.x).mean() / rnorm)
nr = (lr - 1)
elif (nl < 2):
rnorm = length(right.x[:nr], right.y[:nr])
lnorm = length(left.x, left.y)
dry = ((- slope(right.y[:nr])) / rnorm)
drx = ((- slope(right.x[:nr])) / rnorm)
dly = (diff(left.y).mean() / lnorm)
dlx = (diff(left.x).mean() / lnorm)
nl = 0
else:
rnorm = length(right.x[:nr], right.y[:nr])
lnorm = length(left.x[(- nl):], left.y[(- nl):])
dry = ((- slope(right.y[:nr])) / rnorm)
drx = ((- slope(right.x[:nr])) / rnorm)
dly = ((- slope(left.y[(- nl):])) / lnorm)
dlx = ((- slope(left.x[(- nl):])) / lnorm)
rnorm = hypot((left.x[0] - right.x[0]), (left.y[0] - right.y[0]))
lnorm = hypot((left.x[(- 1)] - right.x[0]), (left.y[(- 1)] - right.y[0]))
if (not isfinite(dlx)):
dlx = ((left.x[0] - right.x[0]) / lnorm)
if (not isfinite(dly)):
dly = ((left.y[0] - right.y[0]) / lnorm)
if (not isfinite(drx)):
drx = ((left.x[(- 1)] - right.x[0]) / rnorm)
if (not isfinite(dry)):
dry = ((left.y[(- 1)] - right.y[0]) / rnorm)
if reverse:
dlx = (- dlx)
dly = (- dly)
drx = (- drx)
dry = (- dry)
ry = right.y[0]
ly = left.y[(- 1)]
rx = right.x[0]
lx = left.x[(- 1)]
L = hypot((rx - lx), (ry - ly))
print(('L:%g' % L))
yv = matrix([[ly], [ry], [(dly * L)], [(dry * L)]])
xv = matrix([[lx], [rx], [(dlx * L)], [(drx * L)]])
cx = (minv * xv)
cy = (minv * yv)
if (not (isfinite(cx).any() and isfinite(cy).any())):
pdb.set_trace()
return [array(t).squeeze() for t in (cx, cy)] |
def filter_ends(wv, min_score, shape, border=10):
'\n Return candidate ends for joining.\n\n Returns an iterator yielding (Whisker_Seg, side).\n '
(maxy, maxx) = [(x - border) for x in shape]
(minx, miny) = (border, border)
test_point = (lambda x, y: ((x > minx) and (x < maxx) and (y > miny) and (y < maxy)))
bordertest = (lambda e, side: test_point(e.x[side], e.y[side]))
scoretest = (lambda e, side: (e.scores[side] > min_score))
sides = [0, (- 1)]
for e in wv:
for s in sides:
if (bordertest(e, s) and scoretest(e, s)):
(yield (e, s)) | 246,672,456,328,021,300 | Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side). | whisk/test_merge3.py | filter_ends | aiporre/whisk | python | def filter_ends(wv, min_score, shape, border=10):
'\n Return candidate ends for joining.\n\n Returns an iterator yielding (Whisker_Seg, side).\n '
(maxy, maxx) = [(x - border) for x in shape]
(minx, miny) = (border, border)
test_point = (lambda x, y: ((x > minx) and (x < maxx) and (y > miny) and (y < maxy)))
bordertest = (lambda e, side: test_point(e.x[side], e.y[side]))
scoretest = (lambda e, side: (e.scores[side] > min_score))
sides = [0, (- 1)]
for e in wv:
for s in sides:
if (bordertest(e, s) and scoretest(e, s)):
(yield (e, s)) |
def __init__(self, wvd, shape, scale):
' `wvd` may be either a dict or list of whiskers '
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = (shape[1] / scale)
self.topx = (lambda p: (int((p[0] / scale)) + (stride * int((p[1] / scale)))))
self._build_inverse_table(wvd) | -3,386,438,633,474,830,000 | `wvd` may be either a dict or list of whiskers | whisk/test_merge3.py | __init__ | aiporre/whisk | python | def __init__(self, wvd, shape, scale):
' '
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = (shape[1] / scale)
self.topx = (lambda p: (int((p[0] / scale)) + (stride * int((p[1] / scale)))))
self._build_inverse_table(wvd) |
def update(self, changes):
' Changes is a dict mapping old whisker segments to new segments '
last = None
for (w, p) in changes.items():
self.remove(w)
if p:
self.add(p[0])
self.add(p[(- 1)])
last = p[1]
if last:
self.add(last) | -137,687,085,251,495,400 | Changes is a dict mapping old whisker segments to new segments | whisk/test_merge3.py | update | aiporre/whisk | python | def update(self, changes):
' '
last = None
for (w, p) in changes.items():
self.remove(w)
if p:
self.add(p[0])
self.add(p[(- 1)])
last = p[1]
if last:
self.add(last) |
def __next__(self):
' This changes the inverse table by removing hits.\n\n Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple\n or None, if done.\n '
todelete = []
retval = None
for (px, s) in self._map.items():
todelete.append(px)
if (len(s) > 1):
retval = s
break
for k in todelete:
del self._map[k]
return retval | 5,829,238,777,711,661,000 | This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done. | whisk/test_merge3.py | __next__ | aiporre/whisk | python | def __next__(self):
' This changes the inverse table by removing hits.\n\n Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple\n or None, if done.\n '
todelete = []
retval = None
for (px, s) in self._map.items():
todelete.append(px)
if (len(s) > 1):
retval = s
break
for k in todelete:
del self._map[k]
return retval |
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
'\n Will return the instructions file path set in __init__\n '
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file) | -4,977,607,476,640,469,000 | Will return the instructions file path set in __init__ | tests/test_xmlparser.py | test_instructionspath | 3Peso/mosk | python | @patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file) |
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
'\n Will raise FileNotFound exeption.\n '
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file | 8,851,255,329,636,294,000 | Will raise FileNotFound exeption. | tests/test_xmlparser.py | test_instructionspath_instruction_file_not_there | 3Peso/mosk | python | @patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file |
def test__validate_schema_valid_instructions(self):
'\n Should do nothing.\n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail('_validate_schema should not raise exception with valid xml instructions.') | 15,335,525,377,500,458 | Should do nothing. | tests/test_xmlparser.py | test__validate_schema_valid_instructions | 3Peso/mosk | python | def test__validate_schema_valid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail('_validate_schema should not raise exception with valid xml instructions.') |
def test__validate_schema_invalid_instructions(self):
'\n Should raise exception.\n '
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException, XmlParser._validate_schema, './instructions/invalid_instructions.xml') | -6,052,100,148,682,693,000 | Should raise exception. | tests/test_xmlparser.py | test__validate_schema_invalid_instructions | 3Peso/mosk | python | def test__validate_schema_invalid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException, XmlParser._validate_schema, './instructions/invalid_instructions.xml') |
def test__validate_schema_minimal_valid_instructions(self):
'\n Should do nothing.\n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail('_validate_schema should not raise exception with valid xml instructions.') | 2,742,942,849,900,286,500 | Should do nothing. | tests/test_xmlparser.py | test__validate_schema_minimal_valid_instructions | 3Peso/mosk | python | def test__validate_schema_minimal_valid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail('_validate_schema should not raise exception with valid xml instructions.') |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
'\n Should initialize member \'metadata\' with all elements which have the attribute "title".\n '
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data]) | 3,682,683,099,307,162,000 | Should initialize member 'metadata' with all elements which have the attribute "title". | tests/test_xmlparser.py | test__initializemetadata_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
'\n Should initialize member \'metadata\' with all elements which have the attribute "title".\n '
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data]) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n Should initialize collectors for all XML elements which have the attribute "module".\n '
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper) | -2,930,029,731,182,761,500 | Should initialize collectors for all XML elements which have the attribute "module". | tests/test_xmlparser.py | test__init_instructions_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n Should return the instruction tree starting with "Root" node.\n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname, 'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname, 'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname, 'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname, 'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname, 'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname, 'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname, 'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname, 'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname, 'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname, 'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname, 'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname, 'LocalTime') | -8,174,262,218,797,025,000 | Should return the instruction tree starting with "Root" node. | tests/test_xmlparser.py | test__init_instructions_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname, 'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname, 'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname, 'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname, 'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname, 'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname, 'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname, 'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname, 'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname, 'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname, 'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname, 'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname, 'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname, 'LocalTime') |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
'\n Should return the xml element with the title "Root".\n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root') | -8,376,315,482,908,859,000 | Should return the xml element with the title "Root". | tests/test_xmlparser.py | test__get_first_instruction_element | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root') |
def test__get_placeholder_name(self):
'\n If XmlElement contains attribute "placeholder" method should return value of this attribute.\n '
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, 'test')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test') | 4,556,195,059,788,626,000 | If XmlElement contains attribute "placeholder" method should return value of this attribute. | tests/test_xmlparser.py | test__get_placeholder_name | 3Peso/mosk | python | def test__get_placeholder_name(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, 'test')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test') |
def test__get_placeholder_name_no_placeholder(self):
'\n If XmlElement does not contain attribute "placeholder" method should return an empty string.\n '
from instructionparsers.xmlparser import XmlParser
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, '') | 1,274,365,093,746,357,800 | If XmlElement does not contain attribute "placeholder" method should return an empty string. | tests/test_xmlparser.py | test__get_placeholder_name_no_placeholder | 3Peso/mosk | python | def test__get_placeholder_name_no_placeholder(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, ) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
'\n Should return UserDict\n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict) | 8,628,808,085,382,813,000 | Should return UserDict | tests/test_xmlparser.py | test__get_parameter_attributes_return_userdict | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
'\n Should return dict with two entries\n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2) | 4,547,153,624,282,183,700 | Should return dict with two entries | tests/test_xmlparser.py | test__get_parameter_attributes_return_userdict_with_2_entries | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
'\n Should return dicitionry with "users_with_homedir" key and with "properties" key.\n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get('properties'))
self.assertIsNotNone(actual.get('users_with_homedir')) | -7,049,063,655,947,019,000 | Should return dicitionry with "users_with_homedir" key and with "properties" key. | tests/test_xmlparser.py | test__get_parameter_attributes_should_return_none_special_attributes | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get('properties'))
self.assertIsNotNone(actual.get('users_with_homedir')) |
def LessOptionsStart(builder):
'This method is deprecated. Please switch to Start.'
return Start(builder) | -7,271,987,008,384,153,000 | This method is deprecated. Please switch to Start. | tf2onnx/tflite/LessOptions.py | LessOptionsStart | LoicDagnas/tensorflow-onnx | python | def LessOptionsStart(builder):
return Start(builder) |
def LessOptionsEnd(builder):
'This method is deprecated. Please switch to End.'
return End(builder) | 7,464,512,795,446,503,000 | This method is deprecated. Please switch to End. | tf2onnx/tflite/LessOptions.py | LessOptionsEnd | LoicDagnas/tensorflow-onnx | python | def LessOptionsEnd(builder):
return End(builder) |
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
'This method is deprecated. Please switch to GetRootAs.'
return cls.GetRootAs(buf, offset) | 7,481,250,055,416,234,000 | This method is deprecated. Please switch to GetRootAs. | tf2onnx/tflite/LessOptions.py | GetRootAsLessOptions | LoicDagnas/tensorflow-onnx | python | @classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
return cls.GetRootAs(buf, offset) |
@contextlib.contextmanager
def db_role_setter(conn, *, role_name):
'Since we catch exceptions here and log, temporarily install a customised hook.'
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
(yield role_name)
cursor.execute('SET ROLE %s', [old_role]) | 586,806,249,027,235,700 | Since we catch exceptions here and log, temporarily install a customised hook. | src/dirbs/utils.py | db_role_setter | nealmadhu/DIRBS-Core | python | @contextlib.contextmanager
def db_role_setter(conn, *, role_name):
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
(yield role_name)
cursor.execute('SET ROLE %s', [old_role]) |
def compute_md5_hash(file, buf_size=65536):
'Utility method to generate a md5 hash of file.'
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if (not data):
break
md5_hash.update(data)
return md5_hash.hexdigest() | 6,161,984,623,669,644,000 | Utility method to generate a md5 hash of file. | src/dirbs/utils.py | compute_md5_hash | nealmadhu/DIRBS-Core | python | def compute_md5_hash(file, buf_size=65536):
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if (not data):
break
md5_hash.update(data)
return md5_hash.hexdigest() |
def cachebusted_filename_from_contents(byte_array):
'Utility method to generate a unique filename based on the hash of a given content array (of bytes).'
return compute_md5_hash(io.BytesIO(byte_array))[:8] | -1,696,663,648,842,604,000 | Utility method to generate a unique filename based on the hash of a given content array (of bytes). | src/dirbs/utils.py | cachebusted_filename_from_contents | nealmadhu/DIRBS-Core | python | def cachebusted_filename_from_contents(byte_array):
return compute_md5_hash(io.BytesIO(byte_array))[:8] |
def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
'Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands.'
db_args = []
db_args.append('--db-user={0}'.format((user if (user is not None) else dsn.get('user'))))
db_args.append('--db-name={0}'.format((database if (database is not None) else dsn.get('database'))))
db_args.append('--db-port={0}'.format((port if (port is not None) else dsn.get('port'))))
db_args.append('--db-host={0}'.format((host if (host is not None) else dsn.get('host'))))
return db_args | -2,480,064,948,385,259,500 | Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands. | src/dirbs/utils.py | cli_db_params_from_dsn | nealmadhu/DIRBS-Core | python | def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
db_args = []
db_args.append('--db-user={0}'.format((user if (user is not None) else dsn.get('user'))))
db_args.append('--db-name={0}'.format((database if (database is not None) else dsn.get('database'))))
db_args.append('--db-port={0}'.format((port if (port is not None) else dsn.get('port'))))
db_args.append('--db-host={0}'.format((host if (host is not None) else dsn.get('host'))))
return db_args |
def create_db_connection(db_config, readonly=False, autocommit=False):
'Creates a DB connection to the database.\n\n Imports the config module, which results in the config being read from disk.\n Changes to the config file made after this method has been called will not be read.\n\n Calling entity should handle connection errors as appropriate.\n '
logger = logging.getLogger('dirbs.sql')
logger.debug('Attempting to connect to the database {0} on host {1}'.format(db_config.database, db_config.host))
conn = psycopg2.connect('{0} keepalives=1 keepalives_idle=240'.format(db_config.connection_string), cursor_factory=LoggingNamedTupleCursor)
conn.set_session(readonly=readonly, autocommit=autocommit)
logger.debug('Connection to database successful.')
return conn | -6,769,060,532,684,972,000 | Creates a DB connection to the database.
Imports the config module, which results in the config being read from disk.
Changes to the config file made after this method has been called will not be read.
Calling entity should handle connection errors as appropriate. | src/dirbs/utils.py | create_db_connection | nealmadhu/DIRBS-Core | python | def create_db_connection(db_config, readonly=False, autocommit=False):
'Creates a DB connection to the database.\n\n Imports the config module, which results in the config being read from disk.\n Changes to the config file made after this method has been called will not be read.\n\n Calling entity should handle connection errors as appropriate.\n '
logger = logging.getLogger('dirbs.sql')
logger.debug('Attempting to connect to the database {0} on host {1}'.format(db_config.database, db_config.host))
conn = psycopg2.connect('{0} keepalives=1 keepalives_idle=240'.format(db_config.connection_string), cursor_factory=LoggingNamedTupleCursor)
conn.set_session(readonly=readonly, autocommit=autocommit)
logger.debug('Connection to database successful.')
return conn |
def verify_db_schema(conn, required_role):
'Function that runs all DB verification checks.'
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn) | 887,756,644,857,539,100 | Function that runs all DB verification checks. | src/dirbs/utils.py | verify_db_schema | nealmadhu/DIRBS-Core | python | def verify_db_schema(conn, required_role):
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn) |
def warn_if_db_superuser(conn):
'Warn if the current DB user is a PostgreSQL superuser.'
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all DIRBS tasks as a normal user') | 4,244,715,382,031,265,300 | Warn if the current DB user is a PostgreSQL superuser. | src/dirbs/utils.py | warn_if_db_superuser | nealmadhu/DIRBS-Core | python | def warn_if_db_superuser(conn):
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all DIRBS tasks as a normal user') |
def verify_db_roles_installed(conn):
'Function used to verify whether roles have been installed in the DB.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute("SELECT 1 AS res FROM pg_roles WHERE rolname = 'dirbs_core_power_user'")
if (cursor.fetchone() is None):
logger.error("DIRBS Core roles have not been installed - run 'dirbs-db install_roles' before running 'dirbs-db install'")
raise DatabaseSchemaException('DIRBS Core database roles have not been installed') | -1,286,691,893,464,138,200 | Function used to verify whether roles have been installed in the DB. | src/dirbs/utils.py | verify_db_roles_installed | nealmadhu/DIRBS-Core | python | def verify_db_roles_installed(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute("SELECT 1 AS res FROM pg_roles WHERE rolname = 'dirbs_core_power_user'")
if (cursor.fetchone() is None):
logger.error("DIRBS Core roles have not been installed - run 'dirbs-db install_roles' before running 'dirbs-db install'")
raise DatabaseSchemaException('DIRBS Core database roles have not been installed') |
def verify_db_role_for_job(conn, expected_role):
'Function used to verify that the current DB user is in the role expected for this job.'
if (not is_db_user_dirbs_role(conn, expected_role)):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:\n\t1. GRANT {1} TO {0};'.format(role, expected_role)) | 866,532,192,136,172,500 | Function used to verify that the current DB user is in the role expected for this job. | src/dirbs/utils.py | verify_db_role_for_job | nealmadhu/DIRBS-Core | python | def verify_db_role_for_job(conn, expected_role):
if (not is_db_user_dirbs_role(conn, expected_role)):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:\n\t1. GRANT {1} TO {0};'.format(role, expected_role)) |
def verify_db_schema_version(conn):
'Function used to check whether the DB schema version matches the code schema version.'
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if (version != code_db_schema_version):
if (version is None):
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaException('No DB schema installed - perform a dirbs-db install first!')
else:
logger.error('DB schema version does not match code!')
logger.error('Code schema version: %d', code_db_schema_version)
logger.error('DB schema version: %d', version)
raise DatabaseSchemaException('Mismatch between code and DB schema versions - perform a dirbs-db upgrade!') | 5,007,492,157,737,945,000 | Function used to check whether the DB schema version matches the code schema version. | src/dirbs/utils.py | verify_db_schema_version | nealmadhu/DIRBS-Core | python | def verify_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if (version != code_db_schema_version):
if (version is None):
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaException('No DB schema installed - perform a dirbs-db install first!')
else:
logger.error('DB schema version does not match code!')
logger.error('Code schema version: %d', code_db_schema_version)
logger.error('DB schema version: %d', version)
raise DatabaseSchemaException('Mismatch between code and DB schema versions - perform a dirbs-db upgrade!') |
def verify_db_ownership(conn):
'Function used to check whether DB ownership matches what we expect.'
logger = logging.getLogger('dirbs.db')
if (query_db_ownership(conn) != 'dirbs_core_power_user'):
logger.error('Database is not owned by the dirbs_core_power_user group! Please the following as the current DB owner (whilst logged into the database):\n\tALTER DATABASE <database> OWNER TO dirbs_core_power_user;')
raise DatabaseSchemaException('Incorrect database ownership!') | 5,671,161,847,534,446,000 | Function used to check whether DB ownership matches what we expect. | src/dirbs/utils.py | verify_db_ownership | nealmadhu/DIRBS-Core | python | def verify_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
if (query_db_ownership(conn) != 'dirbs_core_power_user'):
logger.error('Database is not owned by the dirbs_core_power_user group! Please the following as the current DB owner (whilst logged into the database):\n\tALTER DATABASE <database> OWNER TO dirbs_core_power_user;')
raise DatabaseSchemaException('Incorrect database ownership!') |
def verify_core_schema(conn):
'Function used to check whether Core schema exists and has correct ownership.'
if (not query_schema_existence(conn, 'core')):
raise DatabaseSchemaException("Missing schema 'core' in DB. Was dirbs-db install run successfully?")
if (query_schema_ownership(conn, 'core') != 'dirbs_core_power_user'):
raise DatabaseSchemaException("Schema 'core' is not owned by dirbs_core_power_user!") | 5,272,892,874,875,728,000 | Function used to check whether Core schema exists and has correct ownership. | src/dirbs/utils.py | verify_core_schema | nealmadhu/DIRBS-Core | python | def verify_core_schema(conn):
if (not query_schema_existence(conn, 'core')):
raise DatabaseSchemaException("Missing schema 'core' in DB. Was dirbs-db install run successfully?")
if (query_schema_ownership(conn, 'core') != 'dirbs_core_power_user'):
raise DatabaseSchemaException("Schema 'core' is not owned by dirbs_core_power_user!") |
def verify_hll_schema(conn):
'Function used to check whether HLL schema exists and that extension is installed correctly.'
logger = logging.getLogger('dirbs.db')
if (not query_schema_existence(conn, 'hll')):
logger.error("Schema 'hll' does not exist. Please ensure the hll extension is installed and run the following as a superuser whilst connected to this DB: \n\t1. CREATE SCHEMA hll;\n\t2. GRANT USAGE ON SCHEMA hll TO dirbs_core_base;\n\t3. CREATE EXTENSION hll SCHEMA hll;")
raise DatabaseSchemaException('HLL schema not created!')
with conn.cursor() as cursor:
try:
cursor.execute("SELECT pg_get_functiondef('hll.hll_print(hll.hll)'::regprocedure)")
except psycopg2.ProgrammingError:
logger.error('The HLL extension is not installed correctly. Please issue the following as a superuser whilst connected to this DB: \n\tCREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('DB search_path does not include hll or extension not installed!') | 2,937,410,098,723,305,500 | Function used to check whether HLL schema exists and that extension is installed correctly. | src/dirbs/utils.py | verify_hll_schema | nealmadhu/DIRBS-Core | python | def verify_hll_schema(conn):
logger = logging.getLogger('dirbs.db')
if (not query_schema_existence(conn, 'hll')):
logger.error("Schema 'hll' does not exist. Please ensure the hll extension is installed and run the following as a superuser whilst connected to this DB: \n\t1. CREATE SCHEMA hll;\n\t2. GRANT USAGE ON SCHEMA hll TO dirbs_core_base;\n\t3. CREATE EXTENSION hll SCHEMA hll;")
raise DatabaseSchemaException('HLL schema not created!')
with conn.cursor() as cursor:
try:
cursor.execute("SELECT pg_get_functiondef('hll.hll_print(hll.hll)'::regprocedure)")
except psycopg2.ProgrammingError:
logger.error('The HLL extension is not installed correctly. Please issue the following as a superuser whilst connected to this DB: \n\tCREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('DB search_path does not include hll or extension not installed!') |
def verify_db_search_path(conn):
'Function used to check whether db_search_path is correct by looking for objects.'
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute("SELECT to_regclass('schema_version')")
res = cursor.fetchone()[0]
if (res is None):
is_search_path_valid = False
try:
cursor.execute("SELECT pg_get_functiondef('hll_print(hll)'::regprocedure)")
except psycopg2.ProgrammingError:
is_search_path_valid = False
if (not is_search_path_valid):
logger.error('The search_path for the database is not set correctly. Please issue the following whilst connected to this DB: \n\tALTER DATABASE <database> SET search_path TO core, hll;')
raise DatabaseSchemaException('DB search_path not set correctly!') | -4,710,171,244,612,890,000 | Function used to check whether db_search_path is correct by looking for objects. | src/dirbs/utils.py | verify_db_search_path | nealmadhu/DIRBS-Core | python | def verify_db_search_path(conn):
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute("SELECT to_regclass('schema_version')")
res = cursor.fetchone()[0]
if (res is None):
is_search_path_valid = False
try:
cursor.execute("SELECT pg_get_functiondef('hll_print(hll)'::regprocedure)")
except psycopg2.ProgrammingError:
is_search_path_valid = False
if (not is_search_path_valid):
logger.error('The search_path for the database is not set correctly. Please issue the following whilst connected to this DB: \n\tALTER DATABASE <database> SET search_path TO core, hll;')
raise DatabaseSchemaException('DB search_path not set correctly!') |
def query_db_schema_version(conn):
'Function to fetch the DB version number from the database.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version')
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())
return None | 7,737,910,117,503,055,000 | Function to fetch the DB version number from the database. | src/dirbs/utils.py | query_db_schema_version | nealmadhu/DIRBS-Core | python | def query_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version')
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())
return None |
def set_db_schema_version(conn, new_version):
'Function to set the DB version number in the database.'
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert (num_rows <= 1)
if (num_rows > 0):
cur.execute('UPDATE schema_version SET version = %s', [new_version])
else:
cur.execute('INSERT INTO schema_version(version) VALUES(%s)', [new_version]) | -7,654,731,514,572,616,000 | Function to set the DB version number in the database. | src/dirbs/utils.py | set_db_schema_version | nealmadhu/DIRBS-Core | python | def set_db_schema_version(conn, new_version):
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert (num_rows <= 1)
if (num_rows > 0):
cur.execute('UPDATE schema_version SET version = %s', [new_version])
else:
cur.execute('INSERT INTO schema_version(version) VALUES(%s)', [new_version]) |
def is_db_user_superuser(conn):
'Function to test whether the current DB user is a PostgreSQL superuser.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolsuper\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0] | -2,014,946,659,195,252,500 | Function to test whether the current DB user is a PostgreSQL superuser. | src/dirbs/utils.py | is_db_user_superuser | nealmadhu/DIRBS-Core | python | def is_db_user_superuser(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolsuper\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0] |
def is_db_user_dirbs_role(conn, role_name):
'Function to test whether the current DB user is in a DIRBS role.'
with conn.cursor() as cur:
cur.execute("SELECT pg_has_role(%s, 'MEMBER')", [role_name])
return cur.fetchone()[0] | 7,042,693,565,980,279,000 | Function to test whether the current DB user is in a DIRBS role. | src/dirbs/utils.py | is_db_user_dirbs_role | nealmadhu/DIRBS-Core | python | def is_db_user_dirbs_role(conn, role_name):
with conn.cursor() as cur:
cur.execute("SELECT pg_has_role(%s, 'MEMBER')", [role_name])
return cur.fetchone()[0] |
def is_db_user_dirbs_poweruser(conn):
'Function to test whether the current DB user is a DIRBS power user.'
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user') | -2,488,561,900,685,019,000 | Function to test whether the current DB user is a DIRBS power user. | src/dirbs/utils.py | is_db_user_dirbs_poweruser | nealmadhu/DIRBS-Core | python | def is_db_user_dirbs_poweruser(conn):
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user') |
def can_db_user_create_roles(conn):
'Function to test whether the current DB user has the CREATEROLE privilege.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolcreaterole\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0] | -1,844,158,055,052,842,200 | Function to test whether the current DB user has the CREATEROLE privilege. | src/dirbs/utils.py | can_db_user_create_roles | nealmadhu/DIRBS-Core | python | def can_db_user_create_roles(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolcreaterole\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0] |
def query_db_ownership(conn):
'Function to verify whether the current database ownership is correct.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_database\n ON (pg_database.datdba = pg_roles.oid)\n WHERE datname = current_database()')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to determing DB owner for current_database')
return None
return res[0] | 6,090,212,680,849,892,000 | Function to verify whether the current database ownership is correct. | src/dirbs/utils.py | query_db_ownership | nealmadhu/DIRBS-Core | python | def query_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_database\n ON (pg_database.datdba = pg_roles.oid)\n WHERE datname = current_database()')
res = cur.fetchone()
if (res is None):
logger.warn('Failed to determing DB owner for current_database')
return None
return res[0] |
def query_schema_existence(conn, schema_name):
'Function to verify whether the current database schema ownership is correct.'
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)', [schema_name])
return cur.fetchone().exists | -1,074,036,478,143,411,000 | Function to verify whether the current database schema ownership is correct. | src/dirbs/utils.py | query_schema_existence | nealmadhu/DIRBS-Core | python | def query_schema_existence(conn, schema_name):
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)', [schema_name])
return cur.fetchone().exists |
def query_schema_ownership(conn, schema_name):
'Function to verify whether the current database schema ownership is correct.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_namespace\n ON (pg_namespace.nspowner = pg_roles.oid)\n WHERE nspname = %s', [schema_name])
res = cur.fetchone()
if (res is None):
logger.warn('Failed to determing owner for current_schema')
return None
return res[0] | 8,221,048,068,172,430,000 | Function to verify whether the current database schema ownership is correct. | src/dirbs/utils.py | query_schema_ownership | nealmadhu/DIRBS-Core | python | def query_schema_ownership(conn, schema_name):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_namespace\n ON (pg_namespace.nspowner = pg_roles.oid)\n WHERE nspname = %s', [schema_name])
res = cur.fetchone()
if (res is None):
logger.warn('Failed to determing owner for current_schema')
return None
return res[0] |
def compute_analysis_end_date(conn, curr_date):
'Function to get the end of the analysis window based on current operator data.'
end_date = curr_date
if (end_date is None):
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_list(conn, monthly_country_child_tbl_list, ['triplet_year', 'triplet_month'])
year_month_tuple_list = [(x.triplet_year, x.triplet_month) for x in year_month_list_in_child_tbls_records]
if (len(year_month_tuple_list) > 0):
year_month_tuple_list.sort(key=(lambda x: (x[0], x[1])), reverse=True)
(latest_year, latest_month) = year_month_tuple_list[0]
cursor.execute(sql.SQL('SELECT MAX(last_seen)\n FROM monthly_network_triplets_country\n WHERE triplet_year = %s\n AND triplet_month = %s'), [latest_year, latest_month])
end_date = cursor.fetchone()[0]
if (end_date is None):
end_date = datetime.date.today()
return (end_date + datetime.timedelta(days=1)) | 4,949,691,341,178,486,000 | Function to get the end of the analysis window based on current operator data. | src/dirbs/utils.py | compute_analysis_end_date | nealmadhu/DIRBS-Core | python | def compute_analysis_end_date(conn, curr_date):
end_date = curr_date
if (end_date is None):
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_list(conn, monthly_country_child_tbl_list, ['triplet_year', 'triplet_month'])
year_month_tuple_list = [(x.triplet_year, x.triplet_month) for x in year_month_list_in_child_tbls_records]
if (len(year_month_tuple_list) > 0):
year_month_tuple_list.sort(key=(lambda x: (x[0], x[1])), reverse=True)
(latest_year, latest_month) = year_month_tuple_list[0]
cursor.execute(sql.SQL('SELECT MAX(last_seen)\n FROM monthly_network_triplets_country\n WHERE triplet_year = %s\n AND triplet_month = %s'), [latest_year, latest_month])
end_date = cursor.fetchone()[0]
if (end_date is None):
end_date = datetime.date.today()
return (end_date + datetime.timedelta(days=1)) |
def hash_string_64bit(s):
'Basic string hash based on taking an initial prime number and multiplying it by another prime numnber.'
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = ((string_hash * 31) + b)
return (string_hash % (pow(2, 63) - 1)) | 6,294,782,927,157,870,000 | Basic string hash based on taking an initial prime number and multiplying it by another prime numnber. | src/dirbs/utils.py | hash_string_64bit | nealmadhu/DIRBS-Core | python | def hash_string_64bit(s):
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = ((string_hash * 31) + b)
return (string_hash % (pow(2, 63) - 1)) |
def child_table_names(conn, parent_name):
'Return a list of table names for a parent table name.'
with conn.cursor() as cursor:
cursor.execute('SELECT c.relname AS child_tblname\n FROM pg_inherits\n JOIN pg_class AS c\n ON (c.oid = inhrelid)\n JOIN pg_class AS p\n ON (p.oid = inhparent)\n JOIN pg_catalog.pg_namespace nc\n ON nc.oid = c.relnamespace\n JOIN pg_catalog.pg_namespace np\n ON np.oid = p.relnamespace\n WHERE p.relname = %s\n AND np.nspname = current_schema()\n AND nc.nspname = current_schema()', [parent_name])
return [res.child_tblname for res in cursor] | -5,567,330,417,522,711,000 | Return a list of table names for a parent table name. | src/dirbs/utils.py | child_table_names | nealmadhu/DIRBS-Core | python | def child_table_names(conn, parent_name):
with conn.cursor() as cursor:
cursor.execute('SELECT c.relname AS child_tblname\n FROM pg_inherits\n JOIN pg_class AS c\n ON (c.oid = inhrelid)\n JOIN pg_class AS p\n ON (p.oid = inhparent)\n JOIN pg_catalog.pg_namespace nc\n ON nc.oid = c.relnamespace\n JOIN pg_catalog.pg_namespace np\n ON np.oid = p.relnamespace\n WHERE p.relname = %s\n AND np.nspname = current_schema()\n AND nc.nspname = current_schema()', [parent_name])
return [res.child_tblname for res in cursor] |
def table_invariants_list(conn, table_names, invariant_col_names):
'Gets a list of tuples containing the values for common table invariant columns across a list table names.'
if (len(table_names) == 0):
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL('SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}').format(sql.SQL(', ').join(map(sql.Identifier, invariant_col_names)), sql.Identifier(tblname), sql.Identifier('tmp_{0}'.format(tblname))))
cursor.execute(sql.SQL(' UNION ALL ').join(table_queries))
return cursor.fetchall() | -7,955,107,562,763,765,000 | Gets a list of tuples containing the values for common table invariant columns across a list table names. | src/dirbs/utils.py | table_invariants_list | nealmadhu/DIRBS-Core | python | def table_invariants_list(conn, table_names, invariant_col_names):
if (len(table_names) == 0):
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL('SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}').format(sql.SQL(', ').join(map(sql.Identifier, invariant_col_names)), sql.Identifier(tblname), sql.Identifier('tmp_{0}'.format(tblname))))
cursor.execute(sql.SQL(' UNION ALL ').join(table_queries))
return cursor.fetchall() |
def most_recently_run_condition_info(conn, cond_names, successful_only=False):
'For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.\n\n If a particular condition has never completed successfully, the value of the dict will be None, unless the\n successful_only parameter is set to True, in which case the key will not exist in the returned dict.\n '
conditions_to_find = copy.copy(cond_names)
rv = {}
job_metadata_list = metadata.query_for_command_runs(conn, 'dirbs-classify')
for job_metadata in job_metadata_list:
extra_metadata = job_metadata.extra_metadata
metadata_conditions = extra_metadata.get('conditions', {})
matched_imei_counts = extra_metadata.get('matched_imei_counts', {})
conditions_lookup = {c['label']: c for c in metadata_conditions}
for req_cond_name in copy.copy(conditions_to_find):
if (req_cond_name in matched_imei_counts):
rv[req_cond_name] = {'run_id': job_metadata.run_id, 'config': conditions_lookup[req_cond_name], 'last_successful_run': job_metadata.start_time}
conditions_to_find.remove(req_cond_name)
if (not successful_only):
for missing_cond_name in conditions_to_find:
rv[missing_cond_name] = None
return rv | 6,417,752,044,326,887,000 | For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.
If a particular condition has never completed successfully, the value of the dict will be None, unless the
successful_only parameter is set to True, in which case the key will not exist in the returned dict. | src/dirbs/utils.py | most_recently_run_condition_info | nealmadhu/DIRBS-Core | python | def most_recently_run_condition_info(conn, cond_names, successful_only=False):
'For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.\n\n If a particular condition has never completed successfully, the value of the dict will be None, unless the\n successful_only parameter is set to True, in which case the key will not exist in the returned dict.\n '
conditions_to_find = copy.copy(cond_names)
rv = {}
job_metadata_list = metadata.query_for_command_runs(conn, 'dirbs-classify')
for job_metadata in job_metadata_list:
extra_metadata = job_metadata.extra_metadata
metadata_conditions = extra_metadata.get('conditions', {})
matched_imei_counts = extra_metadata.get('matched_imei_counts', {})
conditions_lookup = {c['label']: c for c in metadata_conditions}
for req_cond_name in copy.copy(conditions_to_find):
if (req_cond_name in matched_imei_counts):
rv[req_cond_name] = {'run_id': job_metadata.run_id, 'config': conditions_lookup[req_cond_name], 'last_successful_run': job_metadata.start_time}
conditions_to_find.remove(req_cond_name)
if (not successful_only):
for missing_cond_name in conditions_to_find:
rv[missing_cond_name] = None
return rv |
def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
'Function to return SQL filtering out exempted device types.'
return sql.SQL('SELECT imei_norm\n FROM (SELECT imei_norm,\n SUBSTRING(imei_norm, 1, 8) AS tac\n FROM ({0}) imeis) imeis_with_tac\n JOIN gsma_data\n USING (tac)\n WHERE device_type NOT IN {1}\n ').format(sql.SQL(imei_list_sql), sql.Literal(tuple(exempted_device_types))).as_string(conn) | -3,477,251,885,230,759,000 | Function to return SQL filtering out exempted device types. | src/dirbs/utils.py | filter_imei_list_sql_by_device_type | nealmadhu/DIRBS-Core | python | def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
return sql.SQL('SELECT imei_norm\n FROM (SELECT imei_norm,\n SUBSTRING(imei_norm, 1, 8) AS tac\n FROM ({0}) imeis) imeis_with_tac\n JOIN gsma_data\n USING (tac)\n WHERE device_type NOT IN {1}\n ').format(sql.SQL(imei_list_sql), sql.Literal(tuple(exempted_device_types))).as_string(conn) |
def format_datetime_for_report(timestamp_with_tz):
"Format the datetime into a string for reporting.\n\n Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6\n "
if (timestamp_with_tz is not None):
return timestamp_with_tz.strftime('%Y-%m-%d %X')
else:
return None | -4,662,658,461,025,122,000 | Format the datetime into a string for reporting.
Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6 | src/dirbs/utils.py | format_datetime_for_report | nealmadhu/DIRBS-Core | python | def format_datetime_for_report(timestamp_with_tz):
"Format the datetime into a string for reporting.\n\n Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6\n "
if (timestamp_with_tz is not None):
return timestamp_with_tz.strftime('%Y-%m-%d %X')
else:
return None |
def validate_exempted_device_types(conn, config):
'Method to validate exempted device types specified in config.'
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if (len(exempted_device_types) > 0):
cursor.execute('SELECT DISTINCT device_type FROM gsma_data')
all_device_types = [x.device_type for x in cursor]
if (len(all_device_types) == 0):
logger.warning('RegionConfig: Ignoring setting exempted_device_types={0} as GSMA TAC database not imported or no device types found.'.format(exempted_device_types))
else:
invalid_device_types = (set(exempted_device_types) - set(all_device_types))
if (len(invalid_device_types) > 0):
msg = "RegionConfig: exempted_device_types '{0}' is/are not valid device type(s). The valid GSMA device types are: '{1}'".format(invalid_device_types, all_device_types)
logger.error(msg)
raise ConfigParseException(msg) | -8,926,925,077,652,810,000 | Method to validate exempted device types specified in config. | src/dirbs/utils.py | validate_exempted_device_types | nealmadhu/DIRBS-Core | python | def validate_exempted_device_types(conn, config):
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if (len(exempted_device_types) > 0):
cursor.execute('SELECT DISTINCT device_type FROM gsma_data')
all_device_types = [x.device_type for x in cursor]
if (len(all_device_types) == 0):
logger.warning('RegionConfig: Ignoring setting exempted_device_types={0} as GSMA TAC database not imported or no device types found.'.format(exempted_device_types))
else:
invalid_device_types = (set(exempted_device_types) - set(all_device_types))
if (len(invalid_device_types) > 0):
msg = "RegionConfig: exempted_device_types '{0}' is/are not valid device type(s). The valid GSMA device types are: '{1}'".format(invalid_device_types, all_device_types)
logger.error(msg)
raise ConfigParseException(msg) |
def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message='', start_date_inclusive=True, end_date_inclusive=False):
'Helper function to print out window on used for analysis and list generation using interval notation.'
start_date_interval_notation = ('[' if start_date_inclusive else '(')
end_date_interval_notation = (']' if end_date_inclusive else ')')
logger.debug('{0} {sd_interval_notation}{start_date}, {end_date}{ed_interval_notation}'.format(start_message, sd_interval_notation=start_date_interval_notation, start_date=analysis_start_date, end_date=analysis_end_date, ed_interval_notation=end_date_interval_notation)) | 1,355,111,144,782,573,000 | Helper function to print out window on used for analysis and list generation using interval notation. | src/dirbs/utils.py | log_analysis_window | nealmadhu/DIRBS-Core | python | def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message=, start_date_inclusive=True, end_date_inclusive=False):
start_date_interval_notation = ('[' if start_date_inclusive else '(')
end_date_interval_notation = (']' if end_date_inclusive else ')')
logger.debug('{0} {sd_interval_notation}{start_date}, {end_date}{ed_interval_notation}'.format(start_message, sd_interval_notation=start_date_interval_notation, start_date=analysis_start_date, end_date=analysis_end_date, ed_interval_notation=end_date_interval_notation)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.