repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
transpose_axes
def transpose_axes(image, axes, asaxes=None): """Return image with its axes permuted to match specified axes. A view is returned if possible. >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape (5, 2, 1, 3, 4) """ for ax in axes: if ax not in asaxes: raise ValueError('unknown axis %s' % ax) # add missing axes to image if asaxes is None: asaxes = 'CTZYX' shape = image.shape for ax in reversed(asaxes): if ax not in axes: axes = ax + axes shape = (1,) + shape image = image.reshape(shape) # transpose axes image = image.transpose([axes.index(ax) for ax in asaxes]) return image
python
def transpose_axes(image, axes, asaxes=None): """Return image with its axes permuted to match specified axes. A view is returned if possible. >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape (5, 2, 1, 3, 4) """ for ax in axes: if ax not in asaxes: raise ValueError('unknown axis %s' % ax) # add missing axes to image if asaxes is None: asaxes = 'CTZYX' shape = image.shape for ax in reversed(asaxes): if ax not in axes: axes = ax + axes shape = (1,) + shape image = image.reshape(shape) # transpose axes image = image.transpose([axes.index(ax) for ax in asaxes]) return image
[ "def", "transpose_axes", "(", "image", ",", "axes", ",", "asaxes", "=", "None", ")", ":", "for", "ax", "in", "axes", ":", "if", "ax", "not", "in", "asaxes", ":", "raise", "ValueError", "(", "'unknown axis %s'", "%", "ax", ")", "# add missing axes to image", "if", "asaxes", "is", "None", ":", "asaxes", "=", "'CTZYX'", "shape", "=", "image", ".", "shape", "for", "ax", "in", "reversed", "(", "asaxes", ")", ":", "if", "ax", "not", "in", "axes", ":", "axes", "=", "ax", "+", "axes", "shape", "=", "(", "1", ",", ")", "+", "shape", "image", "=", "image", ".", "reshape", "(", "shape", ")", "# transpose axes", "image", "=", "image", ".", "transpose", "(", "[", "axes", ".", "index", "(", "ax", ")", "for", "ax", "in", "asaxes", "]", ")", "return", "image" ]
Return image with its axes permuted to match specified axes. A view is returned if possible. >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape (5, 2, 1, 3, 4)
[ "Return", "image", "with", "its", "axes", "permuted", "to", "match", "specified", "axes", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9715-L9738
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
reshape_axes
def reshape_axes(axes, shape, newshape, unknown=None): """Return axes matching new shape. By default, unknown dimensions are labelled 'Q'. >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) 'YX' >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) 'QQYQXQ' """ shape = tuple(shape) newshape = tuple(newshape) if len(axes) != len(shape): raise ValueError('axes do not match shape') size = product(shape) newsize = product(newshape) if size != newsize: raise ValueError('cannot reshape %s to %s' % (shape, newshape)) if not axes or not newshape: return '' lendiff = max(0, len(shape) - len(newshape)) if lendiff: newshape = newshape + (1,) * lendiff i = len(shape)-1 prodns = 1 prods = 1 result = [] for ns in newshape[::-1]: prodns *= ns while i > 0 and shape[i] == 1 and ns != 1: i -= 1 if ns == shape[i] and prodns == prods*shape[i]: prods *= shape[i] result.append(axes[i]) i -= 1 elif unknown: result.append(unknown) else: unknown = 'Q' result.append(unknown) return ''.join(reversed(result[lendiff:]))
python
def reshape_axes(axes, shape, newshape, unknown=None): """Return axes matching new shape. By default, unknown dimensions are labelled 'Q'. >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) 'YX' >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) 'QQYQXQ' """ shape = tuple(shape) newshape = tuple(newshape) if len(axes) != len(shape): raise ValueError('axes do not match shape') size = product(shape) newsize = product(newshape) if size != newsize: raise ValueError('cannot reshape %s to %s' % (shape, newshape)) if not axes or not newshape: return '' lendiff = max(0, len(shape) - len(newshape)) if lendiff: newshape = newshape + (1,) * lendiff i = len(shape)-1 prodns = 1 prods = 1 result = [] for ns in newshape[::-1]: prodns *= ns while i > 0 and shape[i] == 1 and ns != 1: i -= 1 if ns == shape[i] and prodns == prods*shape[i]: prods *= shape[i] result.append(axes[i]) i -= 1 elif unknown: result.append(unknown) else: unknown = 'Q' result.append(unknown) return ''.join(reversed(result[lendiff:]))
[ "def", "reshape_axes", "(", "axes", ",", "shape", ",", "newshape", ",", "unknown", "=", "None", ")", ":", "shape", "=", "tuple", "(", "shape", ")", "newshape", "=", "tuple", "(", "newshape", ")", "if", "len", "(", "axes", ")", "!=", "len", "(", "shape", ")", ":", "raise", "ValueError", "(", "'axes do not match shape'", ")", "size", "=", "product", "(", "shape", ")", "newsize", "=", "product", "(", "newshape", ")", "if", "size", "!=", "newsize", ":", "raise", "ValueError", "(", "'cannot reshape %s to %s'", "%", "(", "shape", ",", "newshape", ")", ")", "if", "not", "axes", "or", "not", "newshape", ":", "return", "''", "lendiff", "=", "max", "(", "0", ",", "len", "(", "shape", ")", "-", "len", "(", "newshape", ")", ")", "if", "lendiff", ":", "newshape", "=", "newshape", "+", "(", "1", ",", ")", "*", "lendiff", "i", "=", "len", "(", "shape", ")", "-", "1", "prodns", "=", "1", "prods", "=", "1", "result", "=", "[", "]", "for", "ns", "in", "newshape", "[", ":", ":", "-", "1", "]", ":", "prodns", "*=", "ns", "while", "i", ">", "0", "and", "shape", "[", "i", "]", "==", "1", "and", "ns", "!=", "1", ":", "i", "-=", "1", "if", "ns", "==", "shape", "[", "i", "]", "and", "prodns", "==", "prods", "*", "shape", "[", "i", "]", ":", "prods", "*=", "shape", "[", "i", "]", "result", ".", "append", "(", "axes", "[", "i", "]", ")", "i", "-=", "1", "elif", "unknown", ":", "result", ".", "append", "(", "unknown", ")", "else", ":", "unknown", "=", "'Q'", "result", ".", "append", "(", "unknown", ")", "return", "''", ".", "join", "(", "reversed", "(", "result", "[", "lendiff", ":", "]", ")", ")" ]
Return axes matching new shape. By default, unknown dimensions are labelled 'Q'. >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) 'YX' >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) 'QQYQXQ'
[ "Return", "axes", "matching", "new", "shape", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9741-L9786
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
stack_pages
def stack_pages(pages, out=None, maxworkers=None, **kwargs): """Read data from sequence of TiffPage and stack them vertically. Additional parameters are passsed to the TiffPage.asarray function. """ npages = len(pages) if npages == 0: raise ValueError('no pages') if npages == 1: kwargs['maxworkers'] = maxworkers return pages[0].asarray(out=out, **kwargs) page0 = next(p for p in pages if p is not None).keyframe page0.asarray(validate=None) # ThreadPoolExecutor swallows exceptions shape = (npages,) + page0.shape dtype = page0.dtype out = create_output(out, shape, dtype) if maxworkers is None: if page0.compression > 1: if page0.is_tiled: maxworkers = 1 kwargs['maxworkers'] = 0 else: maxworkers = 0 else: maxworkers = 1 if maxworkers == 0: import multiprocessing # noqa: delay import maxworkers = multiprocessing.cpu_count() // 2 if maxworkers > 1: kwargs['maxworkers'] = 1 page0.parent.filehandle.lock = maxworkers > 1 filecache = OpenFileCache(size=max(4, maxworkers), lock=page0.parent.filehandle.lock) def func(page, index, out=out, filecache=filecache, kwargs=kwargs): """Read, decode, and copy page data.""" if page is not None: filecache.open(page.parent.filehandle) out[index] = page.asarray(lock=filecache.lock, reopen=False, validate=False, **kwargs) filecache.close(page.parent.filehandle) if maxworkers < 2: for i, page in enumerate(pages): func(page, i) else: # TODO: add exception handling with ThreadPoolExecutor(maxworkers) as executor: executor.map(func, pages, range(npages)) filecache.clear() page0.parent.filehandle.lock = None return out
python
def stack_pages(pages, out=None, maxworkers=None, **kwargs): """Read data from sequence of TiffPage and stack them vertically. Additional parameters are passsed to the TiffPage.asarray function. """ npages = len(pages) if npages == 0: raise ValueError('no pages') if npages == 1: kwargs['maxworkers'] = maxworkers return pages[0].asarray(out=out, **kwargs) page0 = next(p for p in pages if p is not None).keyframe page0.asarray(validate=None) # ThreadPoolExecutor swallows exceptions shape = (npages,) + page0.shape dtype = page0.dtype out = create_output(out, shape, dtype) if maxworkers is None: if page0.compression > 1: if page0.is_tiled: maxworkers = 1 kwargs['maxworkers'] = 0 else: maxworkers = 0 else: maxworkers = 1 if maxworkers == 0: import multiprocessing # noqa: delay import maxworkers = multiprocessing.cpu_count() // 2 if maxworkers > 1: kwargs['maxworkers'] = 1 page0.parent.filehandle.lock = maxworkers > 1 filecache = OpenFileCache(size=max(4, maxworkers), lock=page0.parent.filehandle.lock) def func(page, index, out=out, filecache=filecache, kwargs=kwargs): """Read, decode, and copy page data.""" if page is not None: filecache.open(page.parent.filehandle) out[index] = page.asarray(lock=filecache.lock, reopen=False, validate=False, **kwargs) filecache.close(page.parent.filehandle) if maxworkers < 2: for i, page in enumerate(pages): func(page, i) else: # TODO: add exception handling with ThreadPoolExecutor(maxworkers) as executor: executor.map(func, pages, range(npages)) filecache.clear() page0.parent.filehandle.lock = None return out
[ "def", "stack_pages", "(", "pages", ",", "out", "=", "None", ",", "maxworkers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "npages", "=", "len", "(", "pages", ")", "if", "npages", "==", "0", ":", "raise", "ValueError", "(", "'no pages'", ")", "if", "npages", "==", "1", ":", "kwargs", "[", "'maxworkers'", "]", "=", "maxworkers", "return", "pages", "[", "0", "]", ".", "asarray", "(", "out", "=", "out", ",", "*", "*", "kwargs", ")", "page0", "=", "next", "(", "p", "for", "p", "in", "pages", "if", "p", "is", "not", "None", ")", ".", "keyframe", "page0", ".", "asarray", "(", "validate", "=", "None", ")", "# ThreadPoolExecutor swallows exceptions", "shape", "=", "(", "npages", ",", ")", "+", "page0", ".", "shape", "dtype", "=", "page0", ".", "dtype", "out", "=", "create_output", "(", "out", ",", "shape", ",", "dtype", ")", "if", "maxworkers", "is", "None", ":", "if", "page0", ".", "compression", ">", "1", ":", "if", "page0", ".", "is_tiled", ":", "maxworkers", "=", "1", "kwargs", "[", "'maxworkers'", "]", "=", "0", "else", ":", "maxworkers", "=", "0", "else", ":", "maxworkers", "=", "1", "if", "maxworkers", "==", "0", ":", "import", "multiprocessing", "# noqa: delay import", "maxworkers", "=", "multiprocessing", ".", "cpu_count", "(", ")", "//", "2", "if", "maxworkers", ">", "1", ":", "kwargs", "[", "'maxworkers'", "]", "=", "1", "page0", ".", "parent", ".", "filehandle", ".", "lock", "=", "maxworkers", ">", "1", "filecache", "=", "OpenFileCache", "(", "size", "=", "max", "(", "4", ",", "maxworkers", ")", ",", "lock", "=", "page0", ".", "parent", ".", "filehandle", ".", "lock", ")", "def", "func", "(", "page", ",", "index", ",", "out", "=", "out", ",", "filecache", "=", "filecache", ",", "kwargs", "=", "kwargs", ")", ":", "\"\"\"Read, decode, and copy page data.\"\"\"", "if", "page", "is", "not", "None", ":", "filecache", ".", "open", "(", "page", ".", "parent", ".", "filehandle", ")", "out", "[", "index", "]", "=", "page", ".", "asarray", "(", "lock", "=", "filecache", ".", "lock", ",", "reopen", "=", "False", ",", "validate", "=", "False", ",", "*", "*", "kwargs", ")", "filecache", ".", "close", "(", "page", ".", "parent", ".", "filehandle", ")", "if", "maxworkers", "<", "2", ":", "for", "i", ",", "page", "in", "enumerate", "(", "pages", ")", ":", "func", "(", "page", ",", "i", ")", "else", ":", "# TODO: add exception handling", "with", "ThreadPoolExecutor", "(", "maxworkers", ")", "as", "executor", ":", "executor", ".", "map", "(", "func", ",", "pages", ",", "range", "(", "npages", ")", ")", "filecache", ".", "clear", "(", ")", "page0", ".", "parent", ".", "filehandle", ".", "lock", "=", "None", "return", "out" ]
Read data from sequence of TiffPage and stack them vertically. Additional parameters are passsed to the TiffPage.asarray function.
[ "Read", "data", "from", "sequence", "of", "TiffPage", "and", "stack", "them", "vertically", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9789-L9848
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
clean_offsetscounts
def clean_offsetscounts(offsets, counts): """Return cleaned offsets and byte counts. Remove zero offsets and counts. Use to sanitize StripOffsets and StripByteCounts tag values. """ # TODO: cythonize this offsets = list(offsets) counts = list(counts) size = len(offsets) if size != len(counts): raise ValueError('StripOffsets and StripByteCounts mismatch') j = 0 for i, (o, b) in enumerate(zip(offsets, counts)): if b > 0: if o > 0: if i > j: offsets[j] = o counts[j] = b j += 1 continue raise ValueError('invalid offset') log.warning('clean_offsetscounts: empty bytecount') if size == len(offsets): return offsets, counts if j == 0: return [offsets[0]], [counts[0]] return offsets[:j], counts[:j]
python
def clean_offsetscounts(offsets, counts): """Return cleaned offsets and byte counts. Remove zero offsets and counts. Use to sanitize StripOffsets and StripByteCounts tag values. """ # TODO: cythonize this offsets = list(offsets) counts = list(counts) size = len(offsets) if size != len(counts): raise ValueError('StripOffsets and StripByteCounts mismatch') j = 0 for i, (o, b) in enumerate(zip(offsets, counts)): if b > 0: if o > 0: if i > j: offsets[j] = o counts[j] = b j += 1 continue raise ValueError('invalid offset') log.warning('clean_offsetscounts: empty bytecount') if size == len(offsets): return offsets, counts if j == 0: return [offsets[0]], [counts[0]] return offsets[:j], counts[:j]
[ "def", "clean_offsetscounts", "(", "offsets", ",", "counts", ")", ":", "# TODO: cythonize this", "offsets", "=", "list", "(", "offsets", ")", "counts", "=", "list", "(", "counts", ")", "size", "=", "len", "(", "offsets", ")", "if", "size", "!=", "len", "(", "counts", ")", ":", "raise", "ValueError", "(", "'StripOffsets and StripByteCounts mismatch'", ")", "j", "=", "0", "for", "i", ",", "(", "o", ",", "b", ")", "in", "enumerate", "(", "zip", "(", "offsets", ",", "counts", ")", ")", ":", "if", "b", ">", "0", ":", "if", "o", ">", "0", ":", "if", "i", ">", "j", ":", "offsets", "[", "j", "]", "=", "o", "counts", "[", "j", "]", "=", "b", "j", "+=", "1", "continue", "raise", "ValueError", "(", "'invalid offset'", ")", "log", ".", "warning", "(", "'clean_offsetscounts: empty bytecount'", ")", "if", "size", "==", "len", "(", "offsets", ")", ":", "return", "offsets", ",", "counts", "if", "j", "==", "0", ":", "return", "[", "offsets", "[", "0", "]", "]", ",", "[", "counts", "[", "0", "]", "]", "return", "offsets", "[", ":", "j", "]", ",", "counts", "[", ":", "j", "]" ]
Return cleaned offsets and byte counts. Remove zero offsets and counts. Use to sanitize StripOffsets and StripByteCounts tag values.
[ "Return", "cleaned", "offsets", "and", "byte", "counts", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9851-L9879
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
buffered_read
def buffered_read(fh, lock, offsets, bytecounts, buffersize=None): """Return iterator over segments read from file.""" if buffersize is None: buffersize = 2**26 length = len(offsets) i = 0 while i < length: data = [] with lock: size = 0 while size < buffersize and i < length: fh.seek(offsets[i]) bytecount = bytecounts[i] data.append(fh.read(bytecount)) # buffer = bytearray(bytecount) # n = fh.readinto(buffer) # data.append(buffer[:n]) size += bytecount i += 1 for segment in data: yield segment
python
def buffered_read(fh, lock, offsets, bytecounts, buffersize=None): """Return iterator over segments read from file.""" if buffersize is None: buffersize = 2**26 length = len(offsets) i = 0 while i < length: data = [] with lock: size = 0 while size < buffersize and i < length: fh.seek(offsets[i]) bytecount = bytecounts[i] data.append(fh.read(bytecount)) # buffer = bytearray(bytecount) # n = fh.readinto(buffer) # data.append(buffer[:n]) size += bytecount i += 1 for segment in data: yield segment
[ "def", "buffered_read", "(", "fh", ",", "lock", ",", "offsets", ",", "bytecounts", ",", "buffersize", "=", "None", ")", ":", "if", "buffersize", "is", "None", ":", "buffersize", "=", "2", "**", "26", "length", "=", "len", "(", "offsets", ")", "i", "=", "0", "while", "i", "<", "length", ":", "data", "=", "[", "]", "with", "lock", ":", "size", "=", "0", "while", "size", "<", "buffersize", "and", "i", "<", "length", ":", "fh", ".", "seek", "(", "offsets", "[", "i", "]", ")", "bytecount", "=", "bytecounts", "[", "i", "]", "data", ".", "append", "(", "fh", ".", "read", "(", "bytecount", ")", ")", "# buffer = bytearray(bytecount)", "# n = fh.readinto(buffer)", "# data.append(buffer[:n])", "size", "+=", "bytecount", "i", "+=", "1", "for", "segment", "in", "data", ":", "yield", "segment" ]
Return iterator over segments read from file.
[ "Return", "iterator", "over", "segments", "read", "from", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9882-L9902
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
create_output
def create_output(out, shape, dtype, mode='w+', suffix=None): """Return numpy array where image data of shape and dtype can be copied. The 'out' parameter may have the following values or types: None An empty array of shape and dtype is created and returned. numpy.ndarray An existing writable array of compatible dtype and shape. A view of the same array is returned after verification. 'memmap' or 'memmap:tempdir' A memory-map to an array stored in a temporary binary file on disk is created and returned. str or open file The file name or file object used to create a memory-map to an array stored in a binary file on disk. The created memory-mapped array is returned. """ if out is None: return numpy.zeros(shape, dtype) if isinstance(out, str) and out[:6] == 'memmap': import tempfile # noqa: delay import tempdir = out[7:] if len(out) > 7 else None if suffix is None: suffix = '.memmap' with tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix) as fh: return numpy.memmap(fh, shape=shape, dtype=dtype, mode=mode) if isinstance(out, numpy.ndarray): if product(shape) != product(out.shape): raise ValueError('incompatible output shape') if not numpy.can_cast(dtype, out.dtype): raise ValueError('incompatible output dtype') return out.reshape(shape) if isinstance(out, pathlib.Path): out = str(out) return numpy.memmap(out, shape=shape, dtype=dtype, mode=mode)
python
def create_output(out, shape, dtype, mode='w+', suffix=None): """Return numpy array where image data of shape and dtype can be copied. The 'out' parameter may have the following values or types: None An empty array of shape and dtype is created and returned. numpy.ndarray An existing writable array of compatible dtype and shape. A view of the same array is returned after verification. 'memmap' or 'memmap:tempdir' A memory-map to an array stored in a temporary binary file on disk is created and returned. str or open file The file name or file object used to create a memory-map to an array stored in a binary file on disk. The created memory-mapped array is returned. """ if out is None: return numpy.zeros(shape, dtype) if isinstance(out, str) and out[:6] == 'memmap': import tempfile # noqa: delay import tempdir = out[7:] if len(out) > 7 else None if suffix is None: suffix = '.memmap' with tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix) as fh: return numpy.memmap(fh, shape=shape, dtype=dtype, mode=mode) if isinstance(out, numpy.ndarray): if product(shape) != product(out.shape): raise ValueError('incompatible output shape') if not numpy.can_cast(dtype, out.dtype): raise ValueError('incompatible output dtype') return out.reshape(shape) if isinstance(out, pathlib.Path): out = str(out) return numpy.memmap(out, shape=shape, dtype=dtype, mode=mode)
[ "def", "create_output", "(", "out", ",", "shape", ",", "dtype", ",", "mode", "=", "'w+'", ",", "suffix", "=", "None", ")", ":", "if", "out", "is", "None", ":", "return", "numpy", ".", "zeros", "(", "shape", ",", "dtype", ")", "if", "isinstance", "(", "out", ",", "str", ")", "and", "out", "[", ":", "6", "]", "==", "'memmap'", ":", "import", "tempfile", "# noqa: delay import", "tempdir", "=", "out", "[", "7", ":", "]", "if", "len", "(", "out", ")", ">", "7", "else", "None", "if", "suffix", "is", "None", ":", "suffix", "=", "'.memmap'", "with", "tempfile", ".", "NamedTemporaryFile", "(", "dir", "=", "tempdir", ",", "suffix", "=", "suffix", ")", "as", "fh", ":", "return", "numpy", ".", "memmap", "(", "fh", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "mode", "=", "mode", ")", "if", "isinstance", "(", "out", ",", "numpy", ".", "ndarray", ")", ":", "if", "product", "(", "shape", ")", "!=", "product", "(", "out", ".", "shape", ")", ":", "raise", "ValueError", "(", "'incompatible output shape'", ")", "if", "not", "numpy", ".", "can_cast", "(", "dtype", ",", "out", ".", "dtype", ")", ":", "raise", "ValueError", "(", "'incompatible output dtype'", ")", "return", "out", ".", "reshape", "(", "shape", ")", "if", "isinstance", "(", "out", ",", "pathlib", ".", "Path", ")", ":", "out", "=", "str", "(", "out", ")", "return", "numpy", ".", "memmap", "(", "out", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "mode", "=", "mode", ")" ]
Return numpy array where image data of shape and dtype can be copied. The 'out' parameter may have the following values or types: None An empty array of shape and dtype is created and returned. numpy.ndarray An existing writable array of compatible dtype and shape. A view of the same array is returned after verification. 'memmap' or 'memmap:tempdir' A memory-map to an array stored in a temporary binary file on disk is created and returned. str or open file The file name or file object used to create a memory-map to an array stored in a binary file on disk. The created memory-mapped array is returned.
[ "Return", "numpy", "array", "where", "image", "data", "of", "shape", "and", "dtype", "can", "be", "copied", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9905-L9941
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
matlabstr2py
def matlabstr2py(string): """Return Python object from Matlab string representation. Return str, bool, int, float, list (Matlab arrays or cells), or dict (Matlab structures) types. Use to access ScanImage metadata. >>> matlabstr2py('1') 1 >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" ... "SI.hChannels.channelsActive = 2") >>> d['SI.hChannels.channelType'] ['stripe', 'stripe'] """ # TODO: handle invalid input # TODO: review unboxing of multidimensional arrays def lex(s): # return sequence of tokens from matlab string representation tokens = ['['] while True: t, i = next_token(s) if t is None: break if t == ';': tokens.extend((']', '[')) elif t == '[': tokens.extend(('[', '[')) elif t == ']': tokens.extend((']', ']')) else: tokens.append(t) s = s[i:] tokens.append(']') return tokens def next_token(s): # return next token in matlab string length = len(s) if length == 0: return None, 0 i = 0 while i < length and s[i] == ' ': i += 1 if i == length: return None, i if s[i] in '{[;]}': return s[i], i + 1 if s[i] == "'": j = i + 1 while j < length and s[j] != "'": j += 1 return s[i: j+1], j + 1 if s[i] == '<': j = i + 1 while j < length and s[j] != '>': j += 1 return s[i: j+1], j + 1 j = i while j < length and not s[j] in ' {[;]}': j += 1 return s[i:j], j def value(s, fail=False): # return Python value of token s = s.strip() if not s: return s if len(s) == 1: try: return int(s) except Exception: if fail: raise ValueError() return s if s[0] == "'": if fail and s[-1] != "'" or "'" in s[1:-1]: raise ValueError() return s[1:-1] if s[0] == '<': if fail and s[-1] != '>' or '<' in s[1:-1]: raise ValueError() return s if fail and any(i in s for i in " ';[]{}"): raise ValueError() if s[0] == '@': return s if s in ('true', 'True'): return True if s in ('false', 'False'): return False if s[:6] == 'zeros(': return numpy.zeros([int(i) for i in s[6:-1].split(',')]).tolist() if s[:5] == 'ones(': return numpy.ones([int(i) for i in s[5:-1].split(',')]).tolist() if '.' in s or 'e' in s: try: return float(s) except Exception: pass try: return int(s) except Exception: pass try: return float(s) # nan, inf except Exception: if fail: raise ValueError() return s def parse(s): # return Python value from string representation of Matlab value s = s.strip() try: return value(s, fail=True) except ValueError: pass result = add2 = [] levels = [add2] for t in lex(s): if t in '[{': add2 = [] levels.append(add2) elif t in ']}': x = levels.pop() if len(x) == 1 and isinstance(x[0], (list, str)): x = x[0] add2 = levels[-1] add2.append(x) else: add2.append(value(t)) if len(result) == 1 and isinstance(result[0], (list, str)): result = result[0] return result if '\r' in string or '\n' in string: # structure d = {} for line in string.splitlines(): line = line.strip() if not line or line[0] == '%': continue k, v = line.split('=', 1) k = k.strip() if any(c in k for c in " ';[]{}<>"): continue d[k] = parse(v) return d return parse(string)
python
def matlabstr2py(string): """Return Python object from Matlab string representation. Return str, bool, int, float, list (Matlab arrays or cells), or dict (Matlab structures) types. Use to access ScanImage metadata. >>> matlabstr2py('1') 1 >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" ... "SI.hChannels.channelsActive = 2") >>> d['SI.hChannels.channelType'] ['stripe', 'stripe'] """ # TODO: handle invalid input # TODO: review unboxing of multidimensional arrays def lex(s): # return sequence of tokens from matlab string representation tokens = ['['] while True: t, i = next_token(s) if t is None: break if t == ';': tokens.extend((']', '[')) elif t == '[': tokens.extend(('[', '[')) elif t == ']': tokens.extend((']', ']')) else: tokens.append(t) s = s[i:] tokens.append(']') return tokens def next_token(s): # return next token in matlab string length = len(s) if length == 0: return None, 0 i = 0 while i < length and s[i] == ' ': i += 1 if i == length: return None, i if s[i] in '{[;]}': return s[i], i + 1 if s[i] == "'": j = i + 1 while j < length and s[j] != "'": j += 1 return s[i: j+1], j + 1 if s[i] == '<': j = i + 1 while j < length and s[j] != '>': j += 1 return s[i: j+1], j + 1 j = i while j < length and not s[j] in ' {[;]}': j += 1 return s[i:j], j def value(s, fail=False): # return Python value of token s = s.strip() if not s: return s if len(s) == 1: try: return int(s) except Exception: if fail: raise ValueError() return s if s[0] == "'": if fail and s[-1] != "'" or "'" in s[1:-1]: raise ValueError() return s[1:-1] if s[0] == '<': if fail and s[-1] != '>' or '<' in s[1:-1]: raise ValueError() return s if fail and any(i in s for i in " ';[]{}"): raise ValueError() if s[0] == '@': return s if s in ('true', 'True'): return True if s in ('false', 'False'): return False if s[:6] == 'zeros(': return numpy.zeros([int(i) for i in s[6:-1].split(',')]).tolist() if s[:5] == 'ones(': return numpy.ones([int(i) for i in s[5:-1].split(',')]).tolist() if '.' in s or 'e' in s: try: return float(s) except Exception: pass try: return int(s) except Exception: pass try: return float(s) # nan, inf except Exception: if fail: raise ValueError() return s def parse(s): # return Python value from string representation of Matlab value s = s.strip() try: return value(s, fail=True) except ValueError: pass result = add2 = [] levels = [add2] for t in lex(s): if t in '[{': add2 = [] levels.append(add2) elif t in ']}': x = levels.pop() if len(x) == 1 and isinstance(x[0], (list, str)): x = x[0] add2 = levels[-1] add2.append(x) else: add2.append(value(t)) if len(result) == 1 and isinstance(result[0], (list, str)): result = result[0] return result if '\r' in string or '\n' in string: # structure d = {} for line in string.splitlines(): line = line.strip() if not line or line[0] == '%': continue k, v = line.split('=', 1) k = k.strip() if any(c in k for c in " ';[]{}<>"): continue d[k] = parse(v) return d return parse(string)
[ "def", "matlabstr2py", "(", "string", ")", ":", "# TODO: handle invalid input", "# TODO: review unboxing of multidimensional arrays", "def", "lex", "(", "s", ")", ":", "# return sequence of tokens from matlab string representation", "tokens", "=", "[", "'['", "]", "while", "True", ":", "t", ",", "i", "=", "next_token", "(", "s", ")", "if", "t", "is", "None", ":", "break", "if", "t", "==", "';'", ":", "tokens", ".", "extend", "(", "(", "']'", ",", "'['", ")", ")", "elif", "t", "==", "'['", ":", "tokens", ".", "extend", "(", "(", "'['", ",", "'['", ")", ")", "elif", "t", "==", "']'", ":", "tokens", ".", "extend", "(", "(", "']'", ",", "']'", ")", ")", "else", ":", "tokens", ".", "append", "(", "t", ")", "s", "=", "s", "[", "i", ":", "]", "tokens", ".", "append", "(", "']'", ")", "return", "tokens", "def", "next_token", "(", "s", ")", ":", "# return next token in matlab string", "length", "=", "len", "(", "s", ")", "if", "length", "==", "0", ":", "return", "None", ",", "0", "i", "=", "0", "while", "i", "<", "length", "and", "s", "[", "i", "]", "==", "' '", ":", "i", "+=", "1", "if", "i", "==", "length", ":", "return", "None", ",", "i", "if", "s", "[", "i", "]", "in", "'{[;]}'", ":", "return", "s", "[", "i", "]", ",", "i", "+", "1", "if", "s", "[", "i", "]", "==", "\"'\"", ":", "j", "=", "i", "+", "1", "while", "j", "<", "length", "and", "s", "[", "j", "]", "!=", "\"'\"", ":", "j", "+=", "1", "return", "s", "[", "i", ":", "j", "+", "1", "]", ",", "j", "+", "1", "if", "s", "[", "i", "]", "==", "'<'", ":", "j", "=", "i", "+", "1", "while", "j", "<", "length", "and", "s", "[", "j", "]", "!=", "'>'", ":", "j", "+=", "1", "return", "s", "[", "i", ":", "j", "+", "1", "]", ",", "j", "+", "1", "j", "=", "i", "while", "j", "<", "length", "and", "not", "s", "[", "j", "]", "in", "' {[;]}'", ":", "j", "+=", "1", "return", "s", "[", "i", ":", "j", "]", ",", "j", "def", "value", "(", "s", ",", "fail", "=", "False", ")", ":", "# return Python value of token", "s", "=", "s", ".", "strip", "(", ")", "if", "not", "s", ":", "return", "s", "if", "len", "(", "s", ")", "==", "1", ":", "try", ":", "return", "int", "(", "s", ")", "except", "Exception", ":", "if", "fail", ":", "raise", "ValueError", "(", ")", "return", "s", "if", "s", "[", "0", "]", "==", "\"'\"", ":", "if", "fail", "and", "s", "[", "-", "1", "]", "!=", "\"'\"", "or", "\"'\"", "in", "s", "[", "1", ":", "-", "1", "]", ":", "raise", "ValueError", "(", ")", "return", "s", "[", "1", ":", "-", "1", "]", "if", "s", "[", "0", "]", "==", "'<'", ":", "if", "fail", "and", "s", "[", "-", "1", "]", "!=", "'>'", "or", "'<'", "in", "s", "[", "1", ":", "-", "1", "]", ":", "raise", "ValueError", "(", ")", "return", "s", "if", "fail", "and", "any", "(", "i", "in", "s", "for", "i", "in", "\" ';[]{}\"", ")", ":", "raise", "ValueError", "(", ")", "if", "s", "[", "0", "]", "==", "'@'", ":", "return", "s", "if", "s", "in", "(", "'true'", ",", "'True'", ")", ":", "return", "True", "if", "s", "in", "(", "'false'", ",", "'False'", ")", ":", "return", "False", "if", "s", "[", ":", "6", "]", "==", "'zeros('", ":", "return", "numpy", ".", "zeros", "(", "[", "int", "(", "i", ")", "for", "i", "in", "s", "[", "6", ":", "-", "1", "]", ".", "split", "(", "','", ")", "]", ")", ".", "tolist", "(", ")", "if", "s", "[", ":", "5", "]", "==", "'ones('", ":", "return", "numpy", ".", "ones", "(", "[", "int", "(", "i", ")", "for", "i", "in", "s", "[", "5", ":", "-", "1", "]", ".", "split", "(", "','", ")", "]", ")", ".", "tolist", "(", ")", "if", "'.'", "in", "s", "or", "'e'", "in", "s", ":", "try", ":", "return", "float", "(", "s", ")", "except", "Exception", ":", "pass", "try", ":", "return", "int", "(", "s", ")", "except", "Exception", ":", "pass", "try", ":", "return", "float", "(", "s", ")", "# nan, inf", "except", "Exception", ":", "if", "fail", ":", "raise", "ValueError", "(", ")", "return", "s", "def", "parse", "(", "s", ")", ":", "# return Python value from string representation of Matlab value", "s", "=", "s", ".", "strip", "(", ")", "try", ":", "return", "value", "(", "s", ",", "fail", "=", "True", ")", "except", "ValueError", ":", "pass", "result", "=", "add2", "=", "[", "]", "levels", "=", "[", "add2", "]", "for", "t", "in", "lex", "(", "s", ")", ":", "if", "t", "in", "'[{'", ":", "add2", "=", "[", "]", "levels", ".", "append", "(", "add2", ")", "elif", "t", "in", "']}'", ":", "x", "=", "levels", ".", "pop", "(", ")", "if", "len", "(", "x", ")", "==", "1", "and", "isinstance", "(", "x", "[", "0", "]", ",", "(", "list", ",", "str", ")", ")", ":", "x", "=", "x", "[", "0", "]", "add2", "=", "levels", "[", "-", "1", "]", "add2", ".", "append", "(", "x", ")", "else", ":", "add2", ".", "append", "(", "value", "(", "t", ")", ")", "if", "len", "(", "result", ")", "==", "1", "and", "isinstance", "(", "result", "[", "0", "]", ",", "(", "list", ",", "str", ")", ")", ":", "result", "=", "result", "[", "0", "]", "return", "result", "if", "'\\r'", "in", "string", "or", "'\\n'", "in", "string", ":", "# structure", "d", "=", "{", "}", "for", "line", "in", "string", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", "[", "0", "]", "==", "'%'", ":", "continue", "k", ",", "v", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "k", "=", "k", ".", "strip", "(", ")", "if", "any", "(", "c", "in", "k", "for", "c", "in", "\" ';[]{}<>\"", ")", ":", "continue", "d", "[", "k", "]", "=", "parse", "(", "v", ")", "return", "d", "return", "parse", "(", "string", ")" ]
Return Python object from Matlab string representation. Return str, bool, int, float, list (Matlab arrays or cells), or dict (Matlab structures) types. Use to access ScanImage metadata. >>> matlabstr2py('1') 1 >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" ... "SI.hChannels.channelsActive = 2") >>> d['SI.hChannels.channelType'] ['stripe', 'stripe']
[ "Return", "Python", "object", "from", "Matlab", "string", "representation", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9944-L10097
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
stripascii
def stripascii(string): """Return string truncated at last byte that is 7-bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b'' """ # TODO: pythonize this i = len(string) while i: i -= 1 if 8 < byte2int(string[i]) < 127: break else: i = -1 return string[:i+1]
python
def stripascii(string): """Return string truncated at last byte that is 7-bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b'' """ # TODO: pythonize this i = len(string) while i: i -= 1 if 8 < byte2int(string[i]) < 127: break else: i = -1 return string[:i+1]
[ "def", "stripascii", "(", "string", ")", ":", "# TODO: pythonize this", "i", "=", "len", "(", "string", ")", "while", "i", ":", "i", "-=", "1", "if", "8", "<", "byte2int", "(", "string", "[", "i", "]", ")", "<", "127", ":", "break", "else", ":", "i", "=", "-", "1", "return", "string", "[", ":", "i", "+", "1", "]" ]
Return string truncated at last byte that is 7-bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b''
[ "Return", "string", "truncated", "at", "last", "byte", "that", "is", "7", "-", "bit", "ASCII", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10115-L10134
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
asbool
def asbool(value, true=(b'true', u'true'), false=(b'false', u'false')): """Return string as bool if possible, else raise TypeError. >>> asbool(b' False ') False """ value = value.strip().lower() if value in true: # might raise UnicodeWarning/BytesWarning return True if value in false: return False raise TypeError()
python
def asbool(value, true=(b'true', u'true'), false=(b'false', u'false')): """Return string as bool if possible, else raise TypeError. >>> asbool(b' False ') False """ value = value.strip().lower() if value in true: # might raise UnicodeWarning/BytesWarning return True if value in false: return False raise TypeError()
[ "def", "asbool", "(", "value", ",", "true", "=", "(", "b'true'", ",", "u'true'", ")", ",", "false", "=", "(", "b'false'", ",", "u'false'", ")", ")", ":", "value", "=", "value", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "value", "in", "true", ":", "# might raise UnicodeWarning/BytesWarning", "return", "True", "if", "value", "in", "false", ":", "return", "False", "raise", "TypeError", "(", ")" ]
Return string as bool if possible, else raise TypeError. >>> asbool(b' False ') False
[ "Return", "string", "as", "bool", "if", "possible", "else", "raise", "TypeError", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10137-L10149
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
astype
def astype(value, types=None): """Return argument as one of types if possible. >>> astype('42') 42 >>> astype('3.14') 3.14 >>> astype('True') True >>> astype(b'Neee-Wom') 'Neee-Wom' """ if types is None: types = int, float, asbool, bytes2str for typ in types: try: return typ(value) except (ValueError, AttributeError, TypeError, UnicodeEncodeError): pass return value
python
def astype(value, types=None): """Return argument as one of types if possible. >>> astype('42') 42 >>> astype('3.14') 3.14 >>> astype('True') True >>> astype(b'Neee-Wom') 'Neee-Wom' """ if types is None: types = int, float, asbool, bytes2str for typ in types: try: return typ(value) except (ValueError, AttributeError, TypeError, UnicodeEncodeError): pass return value
[ "def", "astype", "(", "value", ",", "types", "=", "None", ")", ":", "if", "types", "is", "None", ":", "types", "=", "int", ",", "float", ",", "asbool", ",", "bytes2str", "for", "typ", "in", "types", ":", "try", ":", "return", "typ", "(", "value", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "TypeError", ",", "UnicodeEncodeError", ")", ":", "pass", "return", "value" ]
Return argument as one of types if possible. >>> astype('42') 42 >>> astype('3.14') 3.14 >>> astype('True') True >>> astype(b'Neee-Wom') 'Neee-Wom'
[ "Return", "argument", "as", "one", "of", "types", "if", "possible", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10152-L10172
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
format_size
def format_size(size, threshold=1536): """Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB' """ if size < threshold: return "%i B" % size for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'): size /= 1024.0 if size < threshold: return "%.2f %s" % (size, unit) return 'ginormous'
python
def format_size(size, threshold=1536): """Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB' """ if size < threshold: return "%i B" % size for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'): size /= 1024.0 if size < threshold: return "%.2f %s" % (size, unit) return 'ginormous'
[ "def", "format_size", "(", "size", ",", "threshold", "=", "1536", ")", ":", "if", "size", "<", "threshold", ":", "return", "\"%i B\"", "%", "size", "for", "unit", "in", "(", "'KiB'", ",", "'MiB'", ",", "'GiB'", ",", "'TiB'", ",", "'PiB'", ")", ":", "size", "/=", "1024.0", "if", "size", "<", "threshold", ":", "return", "\"%.2f %s\"", "%", "(", "size", ",", "unit", ")", "return", "'ginormous'" ]
Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB'
[ "Return", "file", "size", "as", "string", "from", "byte", "size", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10175-L10190
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
natural_sorted
def natural_sorted(iterable): """Return human sorted list of strings. E.g. for sorting file names. >>> natural_sorted(['f1', 'f2', 'f10']) ['f1', 'f2', 'f10'] """ def sortkey(x): return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] numbers = re.compile(r'(\d+)') return sorted(iterable, key=sortkey)
python
def natural_sorted(iterable): """Return human sorted list of strings. E.g. for sorting file names. >>> natural_sorted(['f1', 'f2', 'f10']) ['f1', 'f2', 'f10'] """ def sortkey(x): return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] numbers = re.compile(r'(\d+)') return sorted(iterable, key=sortkey)
[ "def", "natural_sorted", "(", "iterable", ")", ":", "def", "sortkey", "(", "x", ")", ":", "return", "[", "(", "int", "(", "c", ")", "if", "c", ".", "isdigit", "(", ")", "else", "c", ")", "for", "c", "in", "re", ".", "split", "(", "numbers", ",", "x", ")", "]", "numbers", "=", "re", ".", "compile", "(", "r'(\\d+)'", ")", "return", "sorted", "(", "iterable", ",", "key", "=", "sortkey", ")" ]
Return human sorted list of strings. E.g. for sorting file names. >>> natural_sorted(['f1', 'f2', 'f10']) ['f1', 'f2', 'f10']
[ "Return", "human", "sorted", "list", "of", "strings", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10244-L10257
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
excel_datetime
def excel_datetime(timestamp, epoch=None): """Return datetime object from timestamp in Excel serial format. Convert LSM time stamps. >>> excel_datetime(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) """ if epoch is None: epoch = datetime.datetime.fromordinal(693594) return epoch + datetime.timedelta(timestamp)
python
def excel_datetime(timestamp, epoch=None): """Return datetime object from timestamp in Excel serial format. Convert LSM time stamps. >>> excel_datetime(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) """ if epoch is None: epoch = datetime.datetime.fromordinal(693594) return epoch + datetime.timedelta(timestamp)
[ "def", "excel_datetime", "(", "timestamp", ",", "epoch", "=", "None", ")", ":", "if", "epoch", "is", "None", ":", "epoch", "=", "datetime", ".", "datetime", ".", "fromordinal", "(", "693594", ")", "return", "epoch", "+", "datetime", ".", "timedelta", "(", "timestamp", ")" ]
Return datetime object from timestamp in Excel serial format. Convert LSM time stamps. >>> excel_datetime(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
[ "Return", "datetime", "object", "from", "timestamp", "in", "Excel", "serial", "format", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10260-L10271
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
julian_datetime
def julian_datetime(julianday, milisecond=0): """Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783) """ if julianday <= 1721423: # no datetime before year 1 return None a = julianday + 1 if a > 2299160: alpha = math.trunc((a - 1867216.25) / 36524.25) a += 1 + alpha - alpha // 4 b = a + (1524 if a > 1721423 else 1158) c = math.trunc((b - 122.1) / 365.25) d = math.trunc(365.25 * c) e = math.trunc((b - d) / 30.6001) day = b - d - math.trunc(30.6001 * e) month = e - (1 if e < 13.5 else 13) year = c - (4716 if month > 2.5 else 4715) hour, milisecond = divmod(milisecond, 1000 * 60 * 60) minute, milisecond = divmod(milisecond, 1000 * 60) second, milisecond = divmod(milisecond, 1000) return datetime.datetime(year, month, day, hour, minute, second, milisecond)
python
def julian_datetime(julianday, milisecond=0): """Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783) """ if julianday <= 1721423: # no datetime before year 1 return None a = julianday + 1 if a > 2299160: alpha = math.trunc((a - 1867216.25) / 36524.25) a += 1 + alpha - alpha // 4 b = a + (1524 if a > 1721423 else 1158) c = math.trunc((b - 122.1) / 365.25) d = math.trunc(365.25 * c) e = math.trunc((b - d) / 30.6001) day = b - d - math.trunc(30.6001 * e) month = e - (1 if e < 13.5 else 13) year = c - (4716 if month > 2.5 else 4715) hour, milisecond = divmod(milisecond, 1000 * 60 * 60) minute, milisecond = divmod(milisecond, 1000 * 60) second, milisecond = divmod(milisecond, 1000) return datetime.datetime(year, month, day, hour, minute, second, milisecond)
[ "def", "julian_datetime", "(", "julianday", ",", "milisecond", "=", "0", ")", ":", "if", "julianday", "<=", "1721423", ":", "# no datetime before year 1", "return", "None", "a", "=", "julianday", "+", "1", "if", "a", ">", "2299160", ":", "alpha", "=", "math", ".", "trunc", "(", "(", "a", "-", "1867216.25", ")", "/", "36524.25", ")", "a", "+=", "1", "+", "alpha", "-", "alpha", "//", "4", "b", "=", "a", "+", "(", "1524", "if", "a", ">", "1721423", "else", "1158", ")", "c", "=", "math", ".", "trunc", "(", "(", "b", "-", "122.1", ")", "/", "365.25", ")", "d", "=", "math", ".", "trunc", "(", "365.25", "*", "c", ")", "e", "=", "math", ".", "trunc", "(", "(", "b", "-", "d", ")", "/", "30.6001", ")", "day", "=", "b", "-", "d", "-", "math", ".", "trunc", "(", "30.6001", "*", "e", ")", "month", "=", "e", "-", "(", "1", "if", "e", "<", "13.5", "else", "13", ")", "year", "=", "c", "-", "(", "4716", "if", "month", ">", "2.5", "else", "4715", ")", "hour", ",", "milisecond", "=", "divmod", "(", "milisecond", ",", "1000", "*", "60", "*", "60", ")", "minute", ",", "milisecond", "=", "divmod", "(", "milisecond", ",", "1000", "*", "60", ")", "second", ",", "milisecond", "=", "divmod", "(", "milisecond", ",", "1000", ")", "return", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ",", "milisecond", ")" ]
Return datetime from days since 1/1/4713 BC and ms since midnight. Convert Julian dates according to MetaMorph. >>> julian_datetime(2451576, 54362783) datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
[ "Return", "datetime", "from", "days", "since", "1", "/", "1", "/", "4713", "BC", "and", "ms", "since", "midnight", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10274-L10305
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
byteorder_isnative
def byteorder_isnative(byteorder): """Return if byteorder matches the system's byteorder. >>> byteorder_isnative('=') True """ if byteorder in ('=', sys.byteorder): return True keys = {'big': '>', 'little': '<'} return keys.get(byteorder, byteorder) == keys[sys.byteorder]
python
def byteorder_isnative(byteorder): """Return if byteorder matches the system's byteorder. >>> byteorder_isnative('=') True """ if byteorder in ('=', sys.byteorder): return True keys = {'big': '>', 'little': '<'} return keys.get(byteorder, byteorder) == keys[sys.byteorder]
[ "def", "byteorder_isnative", "(", "byteorder", ")", ":", "if", "byteorder", "in", "(", "'='", ",", "sys", ".", "byteorder", ")", ":", "return", "True", "keys", "=", "{", "'big'", ":", "'>'", ",", "'little'", ":", "'<'", "}", "return", "keys", ".", "get", "(", "byteorder", ",", "byteorder", ")", "==", "keys", "[", "sys", ".", "byteorder", "]" ]
Return if byteorder matches the system's byteorder. >>> byteorder_isnative('=') True
[ "Return", "if", "byteorder", "matches", "the", "system", "s", "byteorder", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10308-L10318
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
recarray2dict
def recarray2dict(recarray): """Return numpy.recarray as dict.""" # TODO: subarrays result = {} for descr, value in zip(recarray.dtype.descr, recarray): name, dtype = descr[:2] if dtype[1] == 'S': value = bytes2str(stripnull(value)) elif value.ndim < 2: value = value.tolist() result[name] = value return result
python
def recarray2dict(recarray): """Return numpy.recarray as dict.""" # TODO: subarrays result = {} for descr, value in zip(recarray.dtype.descr, recarray): name, dtype = descr[:2] if dtype[1] == 'S': value = bytes2str(stripnull(value)) elif value.ndim < 2: value = value.tolist() result[name] = value return result
[ "def", "recarray2dict", "(", "recarray", ")", ":", "# TODO: subarrays", "result", "=", "{", "}", "for", "descr", ",", "value", "in", "zip", "(", "recarray", ".", "dtype", ".", "descr", ",", "recarray", ")", ":", "name", ",", "dtype", "=", "descr", "[", ":", "2", "]", "if", "dtype", "[", "1", "]", "==", "'S'", ":", "value", "=", "bytes2str", "(", "stripnull", "(", "value", ")", ")", "elif", "value", ".", "ndim", "<", "2", ":", "value", "=", "value", ".", "tolist", "(", ")", "result", "[", "name", "]", "=", "value", "return", "result" ]
Return numpy.recarray as dict.
[ "Return", "numpy", ".", "recarray", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10321-L10332
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
xml2dict
def xml2dict(xml, sanitize=True, prefix=None): """Return XML as dict. >>> xml2dict('<?xml version="1.0" ?><root attr="name"><key>1</key></root>') {'root': {'key': 1, 'attr': 'name'}} """ from xml.etree import cElementTree as etree # delayed import at = tx = '' if prefix: at, tx = prefix def astype(value): # return value as int, float, bool, or str for t in (int, float, asbool): try: return t(value) except Exception: pass return value def etree2dict(t): # adapted from https://stackoverflow.com/a/10077069/453463 key = t.tag if sanitize: key = key.rsplit('}', 1)[-1] d = {key: {} if t.attrib else None} children = list(t) if children: dd = collections.defaultdict(list) for dc in map(etree2dict, children): for k, v in dc.items(): dd[k].append(astype(v)) d = {key: {k: astype(v[0]) if len(v) == 1 else astype(v) for k, v in dd.items()}} if t.attrib: d[key].update((at + k, astype(v)) for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: d[key][tx + 'value'] = astype(text) else: d[key] = astype(text) return d return etree2dict(etree.fromstring(xml))
python
def xml2dict(xml, sanitize=True, prefix=None): """Return XML as dict. >>> xml2dict('<?xml version="1.0" ?><root attr="name"><key>1</key></root>') {'root': {'key': 1, 'attr': 'name'}} """ from xml.etree import cElementTree as etree # delayed import at = tx = '' if prefix: at, tx = prefix def astype(value): # return value as int, float, bool, or str for t in (int, float, asbool): try: return t(value) except Exception: pass return value def etree2dict(t): # adapted from https://stackoverflow.com/a/10077069/453463 key = t.tag if sanitize: key = key.rsplit('}', 1)[-1] d = {key: {} if t.attrib else None} children = list(t) if children: dd = collections.defaultdict(list) for dc in map(etree2dict, children): for k, v in dc.items(): dd[k].append(astype(v)) d = {key: {k: astype(v[0]) if len(v) == 1 else astype(v) for k, v in dd.items()}} if t.attrib: d[key].update((at + k, astype(v)) for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: d[key][tx + 'value'] = astype(text) else: d[key] = astype(text) return d return etree2dict(etree.fromstring(xml))
[ "def", "xml2dict", "(", "xml", ",", "sanitize", "=", "True", ",", "prefix", "=", "None", ")", ":", "from", "xml", ".", "etree", "import", "cElementTree", "as", "etree", "# delayed import", "at", "=", "tx", "=", "''", "if", "prefix", ":", "at", ",", "tx", "=", "prefix", "def", "astype", "(", "value", ")", ":", "# return value as int, float, bool, or str", "for", "t", "in", "(", "int", ",", "float", ",", "asbool", ")", ":", "try", ":", "return", "t", "(", "value", ")", "except", "Exception", ":", "pass", "return", "value", "def", "etree2dict", "(", "t", ")", ":", "# adapted from https://stackoverflow.com/a/10077069/453463", "key", "=", "t", ".", "tag", "if", "sanitize", ":", "key", "=", "key", ".", "rsplit", "(", "'}'", ",", "1", ")", "[", "-", "1", "]", "d", "=", "{", "key", ":", "{", "}", "if", "t", ".", "attrib", "else", "None", "}", "children", "=", "list", "(", "t", ")", "if", "children", ":", "dd", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "dc", "in", "map", "(", "etree2dict", ",", "children", ")", ":", "for", "k", ",", "v", "in", "dc", ".", "items", "(", ")", ":", "dd", "[", "k", "]", ".", "append", "(", "astype", "(", "v", ")", ")", "d", "=", "{", "key", ":", "{", "k", ":", "astype", "(", "v", "[", "0", "]", ")", "if", "len", "(", "v", ")", "==", "1", "else", "astype", "(", "v", ")", "for", "k", ",", "v", "in", "dd", ".", "items", "(", ")", "}", "}", "if", "t", ".", "attrib", ":", "d", "[", "key", "]", ".", "update", "(", "(", "at", "+", "k", ",", "astype", "(", "v", ")", ")", "for", "k", ",", "v", "in", "t", ".", "attrib", ".", "items", "(", ")", ")", "if", "t", ".", "text", ":", "text", "=", "t", ".", "text", ".", "strip", "(", ")", "if", "children", "or", "t", ".", "attrib", ":", "if", "text", ":", "d", "[", "key", "]", "[", "tx", "+", "'value'", "]", "=", "astype", "(", "text", ")", "else", ":", "d", "[", "key", "]", "=", "astype", "(", "text", ")", "return", "d", "return", "etree2dict", "(", "etree", ".", "fromstring", "(", "xml", ")", ")" ]
Return XML as dict. >>> xml2dict('<?xml version="1.0" ?><root attr="name"><key>1</key></root>') {'root': {'key': 1, 'attr': 'name'}}
[ "Return", "XML", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10335-L10382
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
hexdump
def hexdump(bytestr, width=75, height=24, snipat=-2, modulo=2, ellipsis=None): """Return hexdump representation of byte string. >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............' """ size = len(bytestr) if size < 1 or width < 2 or height < 1: return '' if height == 1: addr = b'' bytesperline = min(modulo * (((width - len(addr)) // 4) // modulo), size) if bytesperline < 1: return '' nlines = 1 else: addr = b'%%0%ix: ' % len(b'%x' % size) bytesperline = min(modulo * (((width - len(addr % 1)) // 4) // modulo), size) if bytesperline < 1: return '' width = 3*bytesperline + len(addr % 1) nlines = (size - 1) // bytesperline + 1 if snipat is None or snipat == 1: snipat = height elif 0 < abs(snipat) < 1: snipat = int(math.floor(height * snipat)) if snipat < 0: snipat += height if height == 1 or nlines == 1: blocks = [(0, bytestr[:bytesperline])] addr = b'' height = 1 width = 3 * bytesperline elif height is None or nlines <= height: blocks = [(0, bytestr)] elif snipat <= 0: start = bytesperline * (nlines - height) blocks = [(start, bytestr[start:])] # (start, None) elif snipat >= height or height < 3: end = bytesperline * height blocks = [(0, bytestr[:end])] # (end, None) else: end1 = bytesperline * snipat end2 = bytesperline * (height - snipat - 1) blocks = [(0, bytestr[:end1]), (size-end1-end2, None), (size-end2, bytestr[size-end2:])] ellipsis = b'...' if ellipsis is None else str2bytes(ellipsis) result = [] for start, bytestr in blocks: if bytestr is None: result.append(ellipsis) # 'skip %i bytes' % start) continue hexstr = binascii.hexlify(bytestr) strstr = re.sub(br'[^\x20-\x7f]', b'.', bytestr) for i in range(0, len(bytestr), bytesperline): h = hexstr[2*i:2*i+bytesperline*2] r = (addr % (i + start)) if height > 1 else addr r += b' '.join(h[i:i+2] for i in range(0, 2*bytesperline, 2)) r += b' ' * (width - len(r)) r += strstr[i:i+bytesperline] result.append(r) result = b'\n'.join(result) if sys.version_info[0] == 3: result = result.decode('ascii') return result
python
def hexdump(bytestr, width=75, height=24, snipat=-2, modulo=2, ellipsis=None): """Return hexdump representation of byte string. >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............' """ size = len(bytestr) if size < 1 or width < 2 or height < 1: return '' if height == 1: addr = b'' bytesperline = min(modulo * (((width - len(addr)) // 4) // modulo), size) if bytesperline < 1: return '' nlines = 1 else: addr = b'%%0%ix: ' % len(b'%x' % size) bytesperline = min(modulo * (((width - len(addr % 1)) // 4) // modulo), size) if bytesperline < 1: return '' width = 3*bytesperline + len(addr % 1) nlines = (size - 1) // bytesperline + 1 if snipat is None or snipat == 1: snipat = height elif 0 < abs(snipat) < 1: snipat = int(math.floor(height * snipat)) if snipat < 0: snipat += height if height == 1 or nlines == 1: blocks = [(0, bytestr[:bytesperline])] addr = b'' height = 1 width = 3 * bytesperline elif height is None or nlines <= height: blocks = [(0, bytestr)] elif snipat <= 0: start = bytesperline * (nlines - height) blocks = [(start, bytestr[start:])] # (start, None) elif snipat >= height or height < 3: end = bytesperline * height blocks = [(0, bytestr[:end])] # (end, None) else: end1 = bytesperline * snipat end2 = bytesperline * (height - snipat - 1) blocks = [(0, bytestr[:end1]), (size-end1-end2, None), (size-end2, bytestr[size-end2:])] ellipsis = b'...' if ellipsis is None else str2bytes(ellipsis) result = [] for start, bytestr in blocks: if bytestr is None: result.append(ellipsis) # 'skip %i bytes' % start) continue hexstr = binascii.hexlify(bytestr) strstr = re.sub(br'[^\x20-\x7f]', b'.', bytestr) for i in range(0, len(bytestr), bytesperline): h = hexstr[2*i:2*i+bytesperline*2] r = (addr % (i + start)) if height > 1 else addr r += b' '.join(h[i:i+2] for i in range(0, 2*bytesperline, 2)) r += b' ' * (width - len(r)) r += strstr[i:i+bytesperline] result.append(r) result = b'\n'.join(result) if sys.version_info[0] == 3: result = result.decode('ascii') return result
[ "def", "hexdump", "(", "bytestr", ",", "width", "=", "75", ",", "height", "=", "24", ",", "snipat", "=", "-", "2", ",", "modulo", "=", "2", ",", "ellipsis", "=", "None", ")", ":", "size", "=", "len", "(", "bytestr", ")", "if", "size", "<", "1", "or", "width", "<", "2", "or", "height", "<", "1", ":", "return", "''", "if", "height", "==", "1", ":", "addr", "=", "b''", "bytesperline", "=", "min", "(", "modulo", "*", "(", "(", "(", "width", "-", "len", "(", "addr", ")", ")", "//", "4", ")", "//", "modulo", ")", ",", "size", ")", "if", "bytesperline", "<", "1", ":", "return", "''", "nlines", "=", "1", "else", ":", "addr", "=", "b'%%0%ix: '", "%", "len", "(", "b'%x'", "%", "size", ")", "bytesperline", "=", "min", "(", "modulo", "*", "(", "(", "(", "width", "-", "len", "(", "addr", "%", "1", ")", ")", "//", "4", ")", "//", "modulo", ")", ",", "size", ")", "if", "bytesperline", "<", "1", ":", "return", "''", "width", "=", "3", "*", "bytesperline", "+", "len", "(", "addr", "%", "1", ")", "nlines", "=", "(", "size", "-", "1", ")", "//", "bytesperline", "+", "1", "if", "snipat", "is", "None", "or", "snipat", "==", "1", ":", "snipat", "=", "height", "elif", "0", "<", "abs", "(", "snipat", ")", "<", "1", ":", "snipat", "=", "int", "(", "math", ".", "floor", "(", "height", "*", "snipat", ")", ")", "if", "snipat", "<", "0", ":", "snipat", "+=", "height", "if", "height", "==", "1", "or", "nlines", "==", "1", ":", "blocks", "=", "[", "(", "0", ",", "bytestr", "[", ":", "bytesperline", "]", ")", "]", "addr", "=", "b''", "height", "=", "1", "width", "=", "3", "*", "bytesperline", "elif", "height", "is", "None", "or", "nlines", "<=", "height", ":", "blocks", "=", "[", "(", "0", ",", "bytestr", ")", "]", "elif", "snipat", "<=", "0", ":", "start", "=", "bytesperline", "*", "(", "nlines", "-", "height", ")", "blocks", "=", "[", "(", "start", ",", "bytestr", "[", "start", ":", "]", ")", "]", "# (start, None)", "elif", "snipat", ">=", "height", "or", "height", "<", "3", ":", "end", "=", "bytesperline", "*", "height", "blocks", "=", "[", "(", "0", ",", "bytestr", "[", ":", "end", "]", ")", "]", "# (end, None)", "else", ":", "end1", "=", "bytesperline", "*", "snipat", "end2", "=", "bytesperline", "*", "(", "height", "-", "snipat", "-", "1", ")", "blocks", "=", "[", "(", "0", ",", "bytestr", "[", ":", "end1", "]", ")", ",", "(", "size", "-", "end1", "-", "end2", ",", "None", ")", ",", "(", "size", "-", "end2", ",", "bytestr", "[", "size", "-", "end2", ":", "]", ")", "]", "ellipsis", "=", "b'...'", "if", "ellipsis", "is", "None", "else", "str2bytes", "(", "ellipsis", ")", "result", "=", "[", "]", "for", "start", ",", "bytestr", "in", "blocks", ":", "if", "bytestr", "is", "None", ":", "result", ".", "append", "(", "ellipsis", ")", "# 'skip %i bytes' % start)", "continue", "hexstr", "=", "binascii", ".", "hexlify", "(", "bytestr", ")", "strstr", "=", "re", ".", "sub", "(", "br'[^\\x20-\\x7f]'", ",", "b'.'", ",", "bytestr", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "bytestr", ")", ",", "bytesperline", ")", ":", "h", "=", "hexstr", "[", "2", "*", "i", ":", "2", "*", "i", "+", "bytesperline", "*", "2", "]", "r", "=", "(", "addr", "%", "(", "i", "+", "start", ")", ")", "if", "height", ">", "1", "else", "addr", "r", "+=", "b' '", ".", "join", "(", "h", "[", "i", ":", "i", "+", "2", "]", "for", "i", "in", "range", "(", "0", ",", "2", "*", "bytesperline", ",", "2", ")", ")", "r", "+=", "b' '", "*", "(", "width", "-", "len", "(", "r", ")", ")", "r", "+=", "strstr", "[", "i", ":", "i", "+", "bytesperline", "]", "result", ".", "append", "(", "r", ")", "result", "=", "b'\\n'", ".", "join", "(", "result", ")", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "result", "=", "result", ".", "decode", "(", "'ascii'", ")", "return", "result" ]
Return hexdump representation of byte string. >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............'
[ "Return", "hexdump", "representation", "of", "byte", "string", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10385-L10456
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
isprintable
def isprintable(string): """Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False """ string = string.strip() if not string: return True if sys.version_info[0] == 3: try: return string.isprintable() except Exception: pass try: return string.decode('utf-8').isprintable() except Exception: pass else: if string.isalnum(): return True printable = ('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST' 'UVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c') return all(c in printable for c in string)
python
def isprintable(string): """Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False """ string = string.strip() if not string: return True if sys.version_info[0] == 3: try: return string.isprintable() except Exception: pass try: return string.decode('utf-8').isprintable() except Exception: pass else: if string.isalnum(): return True printable = ('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST' 'UVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c') return all(c in printable for c in string)
[ "def", "isprintable", "(", "string", ")", ":", "string", "=", "string", ".", "strip", "(", ")", "if", "not", "string", ":", "return", "True", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "try", ":", "return", "string", ".", "isprintable", "(", ")", "except", "Exception", ":", "pass", "try", ":", "return", "string", ".", "decode", "(", "'utf-8'", ")", ".", "isprintable", "(", ")", "except", "Exception", ":", "pass", "else", ":", "if", "string", ".", "isalnum", "(", ")", ":", "return", "True", "printable", "=", "(", "'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST'", "'UVWXYZ!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ \\t\\n\\r\\x0b\\x0c'", ")", "return", "all", "(", "c", "in", "printable", "for", "c", "in", "string", ")" ]
Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False
[ "Return", "if", "all", "characters", "in", "string", "are", "printable", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10459-L10485
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
clean_whitespace
def clean_whitespace(string, compact=False): """Return string with compressed whitespace.""" for a, b in (('\r\n', '\n'), ('\r', '\n'), ('\n\n', '\n'), ('\t', ' '), (' ', ' ')): string = string.replace(a, b) if compact: for a, b in (('\n', ' '), ('[ ', '['), (' ', ' '), (' ', ' '), (' ', ' ')): string = string.replace(a, b) return string.strip()
python
def clean_whitespace(string, compact=False): """Return string with compressed whitespace.""" for a, b in (('\r\n', '\n'), ('\r', '\n'), ('\n\n', '\n'), ('\t', ' '), (' ', ' ')): string = string.replace(a, b) if compact: for a, b in (('\n', ' '), ('[ ', '['), (' ', ' '), (' ', ' '), (' ', ' ')): string = string.replace(a, b) return string.strip()
[ "def", "clean_whitespace", "(", "string", ",", "compact", "=", "False", ")", ":", "for", "a", ",", "b", "in", "(", "(", "'\\r\\n'", ",", "'\\n'", ")", ",", "(", "'\\r'", ",", "'\\n'", ")", ",", "(", "'\\n\\n'", ",", "'\\n'", ")", ",", "(", "'\\t'", ",", "' '", ")", ",", "(", "' '", ",", "' '", ")", ")", ":", "string", "=", "string", ".", "replace", "(", "a", ",", "b", ")", "if", "compact", ":", "for", "a", ",", "b", "in", "(", "(", "'\\n'", ",", "' '", ")", ",", "(", "'[ '", ",", "'['", ")", ",", "(", "' '", ",", "' '", ")", ",", "(", "' '", ",", "' '", ")", ",", "(", "' '", ",", "' '", ")", ")", ":", "string", "=", "string", ".", "replace", "(", "a", ",", "b", ")", "return", "string", ".", "strip", "(", ")" ]
Return string with compressed whitespace.
[ "Return", "string", "with", "compressed", "whitespace", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10488-L10497
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
pformat_xml
def pformat_xml(xml): """Return pretty formatted XML.""" try: from lxml import etree # delayed import if not isinstance(xml, bytes): xml = xml.encode('utf-8') xml = etree.parse(io.BytesIO(xml)) xml = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding=xml.docinfo.encoding) xml = bytes2str(xml) except Exception: if isinstance(xml, bytes): xml = bytes2str(xml) xml = xml.replace('><', '>\n<') return xml.replace(' ', ' ').replace('\t', ' ')
python
def pformat_xml(xml): """Return pretty formatted XML.""" try: from lxml import etree # delayed import if not isinstance(xml, bytes): xml = xml.encode('utf-8') xml = etree.parse(io.BytesIO(xml)) xml = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding=xml.docinfo.encoding) xml = bytes2str(xml) except Exception: if isinstance(xml, bytes): xml = bytes2str(xml) xml = xml.replace('><', '>\n<') return xml.replace(' ', ' ').replace('\t', ' ')
[ "def", "pformat_xml", "(", "xml", ")", ":", "try", ":", "from", "lxml", "import", "etree", "# delayed import", "if", "not", "isinstance", "(", "xml", ",", "bytes", ")", ":", "xml", "=", "xml", ".", "encode", "(", "'utf-8'", ")", "xml", "=", "etree", ".", "parse", "(", "io", ".", "BytesIO", "(", "xml", ")", ")", "xml", "=", "etree", ".", "tostring", "(", "xml", ",", "pretty_print", "=", "True", ",", "xml_declaration", "=", "True", ",", "encoding", "=", "xml", ".", "docinfo", ".", "encoding", ")", "xml", "=", "bytes2str", "(", "xml", ")", "except", "Exception", ":", "if", "isinstance", "(", "xml", ",", "bytes", ")", ":", "xml", "=", "bytes2str", "(", "xml", ")", "xml", "=", "xml", ".", "replace", "(", "'><'", ",", "'>\\n<'", ")", "return", "xml", ".", "replace", "(", "' '", ",", "' '", ")", ".", "replace", "(", "'\\t'", ",", "' '", ")" ]
Return pretty formatted XML.
[ "Return", "pretty", "formatted", "XML", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10500-L10514
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
pformat
def pformat(arg, width=79, height=24, compact=True): """Return pretty formatted representation of object as string. Whitespace might be altered. """ if height is None or height < 1: height = 1024 if width is None or width < 1: width = 256 npopt = numpy.get_printoptions() numpy.set_printoptions(threshold=100, linewidth=width) if isinstance(arg, basestring): if arg[:5].lower() in ('<?xml', b'<?xml'): if isinstance(arg, bytes): arg = bytes2str(arg) if height == 1: arg = arg[:4*width] else: arg = pformat_xml(arg) elif isinstance(arg, bytes): if isprintable(arg): arg = bytes2str(arg) arg = clean_whitespace(arg) else: numpy.set_printoptions(**npopt) return hexdump(arg, width=width, height=height, modulo=1) arg = arg.rstrip() elif isinstance(arg, numpy.record): arg = arg.pprint() else: import pprint # delayed import compact = {} if sys.version_info[0] == 2 else dict(compact=compact) arg = pprint.pformat(arg, width=width, **compact) numpy.set_printoptions(**npopt) if height == 1: arg = clean_whitespace(arg, compact=True) return arg[:width] argl = list(arg.splitlines()) if len(argl) > height: arg = '\n'.join(argl[:height//2] + ['...'] + argl[-height//2:]) return arg
python
def pformat(arg, width=79, height=24, compact=True): """Return pretty formatted representation of object as string. Whitespace might be altered. """ if height is None or height < 1: height = 1024 if width is None or width < 1: width = 256 npopt = numpy.get_printoptions() numpy.set_printoptions(threshold=100, linewidth=width) if isinstance(arg, basestring): if arg[:5].lower() in ('<?xml', b'<?xml'): if isinstance(arg, bytes): arg = bytes2str(arg) if height == 1: arg = arg[:4*width] else: arg = pformat_xml(arg) elif isinstance(arg, bytes): if isprintable(arg): arg = bytes2str(arg) arg = clean_whitespace(arg) else: numpy.set_printoptions(**npopt) return hexdump(arg, width=width, height=height, modulo=1) arg = arg.rstrip() elif isinstance(arg, numpy.record): arg = arg.pprint() else: import pprint # delayed import compact = {} if sys.version_info[0] == 2 else dict(compact=compact) arg = pprint.pformat(arg, width=width, **compact) numpy.set_printoptions(**npopt) if height == 1: arg = clean_whitespace(arg, compact=True) return arg[:width] argl = list(arg.splitlines()) if len(argl) > height: arg = '\n'.join(argl[:height//2] + ['...'] + argl[-height//2:]) return arg
[ "def", "pformat", "(", "arg", ",", "width", "=", "79", ",", "height", "=", "24", ",", "compact", "=", "True", ")", ":", "if", "height", "is", "None", "or", "height", "<", "1", ":", "height", "=", "1024", "if", "width", "is", "None", "or", "width", "<", "1", ":", "width", "=", "256", "npopt", "=", "numpy", ".", "get_printoptions", "(", ")", "numpy", ".", "set_printoptions", "(", "threshold", "=", "100", ",", "linewidth", "=", "width", ")", "if", "isinstance", "(", "arg", ",", "basestring", ")", ":", "if", "arg", "[", ":", "5", "]", ".", "lower", "(", ")", "in", "(", "'<?xml'", ",", "b'<?xml'", ")", ":", "if", "isinstance", "(", "arg", ",", "bytes", ")", ":", "arg", "=", "bytes2str", "(", "arg", ")", "if", "height", "==", "1", ":", "arg", "=", "arg", "[", ":", "4", "*", "width", "]", "else", ":", "arg", "=", "pformat_xml", "(", "arg", ")", "elif", "isinstance", "(", "arg", ",", "bytes", ")", ":", "if", "isprintable", "(", "arg", ")", ":", "arg", "=", "bytes2str", "(", "arg", ")", "arg", "=", "clean_whitespace", "(", "arg", ")", "else", ":", "numpy", ".", "set_printoptions", "(", "*", "*", "npopt", ")", "return", "hexdump", "(", "arg", ",", "width", "=", "width", ",", "height", "=", "height", ",", "modulo", "=", "1", ")", "arg", "=", "arg", ".", "rstrip", "(", ")", "elif", "isinstance", "(", "arg", ",", "numpy", ".", "record", ")", ":", "arg", "=", "arg", ".", "pprint", "(", ")", "else", ":", "import", "pprint", "# delayed import", "compact", "=", "{", "}", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", "else", "dict", "(", "compact", "=", "compact", ")", "arg", "=", "pprint", ".", "pformat", "(", "arg", ",", "width", "=", "width", ",", "*", "*", "compact", ")", "numpy", ".", "set_printoptions", "(", "*", "*", "npopt", ")", "if", "height", "==", "1", ":", "arg", "=", "clean_whitespace", "(", "arg", ",", "compact", "=", "True", ")", "return", "arg", "[", ":", "width", "]", "argl", "=", "list", "(", "arg", ".", "splitlines", "(", ")", ")", "if", "len", "(", "argl", ")", ">", "height", ":", "arg", "=", "'\\n'", ".", "join", "(", "argl", "[", ":", "height", "//", "2", "]", "+", "[", "'...'", "]", "+", "argl", "[", "-", "height", "//", "2", ":", "]", ")", "return", "arg" ]
Return pretty formatted representation of object as string. Whitespace might be altered.
[ "Return", "pretty", "formatted", "representation", "of", "object", "as", "string", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10517-L10563
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
snipstr
def snipstr(string, width=79, snipat=None, ellipsis='...'): """Return string cut to specified length. >>> snipstr('abcdefghijklmnop', 8) 'abc...op' """ if snipat is None: snipat = 0.5 if ellipsis is None: if isinstance(string, bytes): ellipsis = b'...' else: ellipsis = u'\u2026' # does not print on win-py3.5 esize = len(ellipsis) splitlines = string.splitlines() # TODO: finish and test multiline snip result = [] for line in splitlines: if line is None: result.append(ellipsis) continue linelen = len(line) if linelen <= width: result.append(string) continue split = snipat if split is None or split == 1: split = linelen elif 0 < abs(split) < 1: split = int(math.floor(linelen * split)) if split < 0: split += linelen if split < 0: split = 0 if esize == 0 or width < esize + 1: if split <= 0: result.append(string[-width:]) else: result.append(string[:width]) elif split <= 0: result.append(ellipsis + string[esize-width:]) elif split >= linelen or width < esize + 4: result.append(string[:width-esize] + ellipsis) else: splitlen = linelen - width + esize end1 = split - splitlen // 2 end2 = end1 + splitlen result.append(string[:end1] + ellipsis + string[end2:]) if isinstance(string, bytes): return b'\n'.join(result) return '\n'.join(result)
python
def snipstr(string, width=79, snipat=None, ellipsis='...'): """Return string cut to specified length. >>> snipstr('abcdefghijklmnop', 8) 'abc...op' """ if snipat is None: snipat = 0.5 if ellipsis is None: if isinstance(string, bytes): ellipsis = b'...' else: ellipsis = u'\u2026' # does not print on win-py3.5 esize = len(ellipsis) splitlines = string.splitlines() # TODO: finish and test multiline snip result = [] for line in splitlines: if line is None: result.append(ellipsis) continue linelen = len(line) if linelen <= width: result.append(string) continue split = snipat if split is None or split == 1: split = linelen elif 0 < abs(split) < 1: split = int(math.floor(linelen * split)) if split < 0: split += linelen if split < 0: split = 0 if esize == 0 or width < esize + 1: if split <= 0: result.append(string[-width:]) else: result.append(string[:width]) elif split <= 0: result.append(ellipsis + string[esize-width:]) elif split >= linelen or width < esize + 4: result.append(string[:width-esize] + ellipsis) else: splitlen = linelen - width + esize end1 = split - splitlen // 2 end2 = end1 + splitlen result.append(string[:end1] + ellipsis + string[end2:]) if isinstance(string, bytes): return b'\n'.join(result) return '\n'.join(result)
[ "def", "snipstr", "(", "string", ",", "width", "=", "79", ",", "snipat", "=", "None", ",", "ellipsis", "=", "'...'", ")", ":", "if", "snipat", "is", "None", ":", "snipat", "=", "0.5", "if", "ellipsis", "is", "None", ":", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "ellipsis", "=", "b'...'", "else", ":", "ellipsis", "=", "u'\\u2026'", "# does not print on win-py3.5", "esize", "=", "len", "(", "ellipsis", ")", "splitlines", "=", "string", ".", "splitlines", "(", ")", "# TODO: finish and test multiline snip", "result", "=", "[", "]", "for", "line", "in", "splitlines", ":", "if", "line", "is", "None", ":", "result", ".", "append", "(", "ellipsis", ")", "continue", "linelen", "=", "len", "(", "line", ")", "if", "linelen", "<=", "width", ":", "result", ".", "append", "(", "string", ")", "continue", "split", "=", "snipat", "if", "split", "is", "None", "or", "split", "==", "1", ":", "split", "=", "linelen", "elif", "0", "<", "abs", "(", "split", ")", "<", "1", ":", "split", "=", "int", "(", "math", ".", "floor", "(", "linelen", "*", "split", ")", ")", "if", "split", "<", "0", ":", "split", "+=", "linelen", "if", "split", "<", "0", ":", "split", "=", "0", "if", "esize", "==", "0", "or", "width", "<", "esize", "+", "1", ":", "if", "split", "<=", "0", ":", "result", ".", "append", "(", "string", "[", "-", "width", ":", "]", ")", "else", ":", "result", ".", "append", "(", "string", "[", ":", "width", "]", ")", "elif", "split", "<=", "0", ":", "result", ".", "append", "(", "ellipsis", "+", "string", "[", "esize", "-", "width", ":", "]", ")", "elif", "split", ">=", "linelen", "or", "width", "<", "esize", "+", "4", ":", "result", ".", "append", "(", "string", "[", ":", "width", "-", "esize", "]", "+", "ellipsis", ")", "else", ":", "splitlen", "=", "linelen", "-", "width", "+", "esize", "end1", "=", "split", "-", "splitlen", "//", "2", "end2", "=", "end1", "+", "splitlen", "result", ".", "append", "(", "string", "[", ":", "end1", "]", "+", "ellipsis", "+", "string", "[", "end2", ":", "]", ")", "if", "isinstance", "(", "string", ",", "bytes", ")", ":", "return", "b'\\n'", ".", "join", "(", "result", ")", "return", "'\\n'", ".", "join", "(", "result", ")" ]
Return string cut to specified length. >>> snipstr('abcdefghijklmnop', 8) 'abc...op'
[ "Return", "string", "cut", "to", "specified", "length", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10566-L10622
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
enumarg
def enumarg(enum, arg): """Return enum member from its name or value. >>> enumarg(TIFF.PHOTOMETRIC, 2) <PHOTOMETRIC.RGB: 2> >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') <PHOTOMETRIC.RGB: 2> """ try: return enum(arg) except Exception: try: return enum[arg.upper()] except Exception: raise ValueError('invalid argument %s' % arg)
python
def enumarg(enum, arg): """Return enum member from its name or value. >>> enumarg(TIFF.PHOTOMETRIC, 2) <PHOTOMETRIC.RGB: 2> >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') <PHOTOMETRIC.RGB: 2> """ try: return enum(arg) except Exception: try: return enum[arg.upper()] except Exception: raise ValueError('invalid argument %s' % arg)
[ "def", "enumarg", "(", "enum", ",", "arg", ")", ":", "try", ":", "return", "enum", "(", "arg", ")", "except", "Exception", ":", "try", ":", "return", "enum", "[", "arg", ".", "upper", "(", ")", "]", "except", "Exception", ":", "raise", "ValueError", "(", "'invalid argument %s'", "%", "arg", ")" ]
Return enum member from its name or value. >>> enumarg(TIFF.PHOTOMETRIC, 2) <PHOTOMETRIC.RGB: 2> >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') <PHOTOMETRIC.RGB: 2>
[ "Return", "enum", "member", "from", "its", "name", "or", "value", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10625-L10640
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
parse_kwargs
def parse_kwargs(kwargs, *keys, **keyvalues): """Return dict with keys from keys|keyvals and values from kwargs|keyvals. Existing keys are deleted from kwargs. >>> kwargs = {'one': 1, 'two': 2, 'four': 4} >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) >>> kwargs == {'one': 1} True >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} True """ result = {} for key in keys: if key in kwargs: result[key] = kwargs[key] del kwargs[key] for key, value in keyvalues.items(): if key in kwargs: result[key] = kwargs[key] del kwargs[key] else: result[key] = value return result
python
def parse_kwargs(kwargs, *keys, **keyvalues): """Return dict with keys from keys|keyvals and values from kwargs|keyvals. Existing keys are deleted from kwargs. >>> kwargs = {'one': 1, 'two': 2, 'four': 4} >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) >>> kwargs == {'one': 1} True >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} True """ result = {} for key in keys: if key in kwargs: result[key] = kwargs[key] del kwargs[key] for key, value in keyvalues.items(): if key in kwargs: result[key] = kwargs[key] del kwargs[key] else: result[key] = value return result
[ "def", "parse_kwargs", "(", "kwargs", ",", "*", "keys", ",", "*", "*", "keyvalues", ")", ":", "result", "=", "{", "}", "for", "key", "in", "keys", ":", "if", "key", "in", "kwargs", ":", "result", "[", "key", "]", "=", "kwargs", "[", "key", "]", "del", "kwargs", "[", "key", "]", "for", "key", ",", "value", "in", "keyvalues", ".", "items", "(", ")", ":", "if", "key", "in", "kwargs", ":", "result", "[", "key", "]", "=", "kwargs", "[", "key", "]", "del", "kwargs", "[", "key", "]", "else", ":", "result", "[", "key", "]", "=", "value", "return", "result" ]
Return dict with keys from keys|keyvals and values from kwargs|keyvals. Existing keys are deleted from kwargs. >>> kwargs = {'one': 1, 'two': 2, 'four': 4} >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) >>> kwargs == {'one': 1} True >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} True
[ "Return", "dict", "with", "keys", "from", "keys|keyvals", "and", "values", "from", "kwargs|keyvals", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10643-L10667
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
update_kwargs
def update_kwargs(kwargs, **keyvalues): """Update dict with keys and values if keys do not already exist. >>> kwargs = {'one': 1, } >>> update_kwargs(kwargs, one=None, two=2) >>> kwargs == {'one': 1, 'two': 2} True """ for key, value in keyvalues.items(): if key not in kwargs: kwargs[key] = value
python
def update_kwargs(kwargs, **keyvalues): """Update dict with keys and values if keys do not already exist. >>> kwargs = {'one': 1, } >>> update_kwargs(kwargs, one=None, two=2) >>> kwargs == {'one': 1, 'two': 2} True """ for key, value in keyvalues.items(): if key not in kwargs: kwargs[key] = value
[ "def", "update_kwargs", "(", "kwargs", ",", "*", "*", "keyvalues", ")", ":", "for", "key", ",", "value", "in", "keyvalues", ".", "items", "(", ")", ":", "if", "key", "not", "in", "kwargs", ":", "kwargs", "[", "key", "]", "=", "value" ]
Update dict with keys and values if keys do not already exist. >>> kwargs = {'one': 1, } >>> update_kwargs(kwargs, one=None, two=2) >>> kwargs == {'one': 1, 'two': 2} True
[ "Update", "dict", "with", "keys", "and", "values", "if", "keys", "do", "not", "already", "exist", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10670-L10681
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
validate_jhove
def validate_jhove(filename, jhove=None, ignore=None): """Validate TIFF file using jhove -m TIFF-hul. Raise ValueError if jhove outputs an error message unless the message contains one of the strings in 'ignore'. JHOVE does not support bigtiff or more than 50 IFDs. See `JHOVE TIFF-hul Module <http://jhove.sourceforge.net/tiff-hul.html>`_ """ import subprocess # noqa: delayed import if ignore is None: ignore = ['More than 50 IFDs'] if jhove is None: jhove = 'jhove' out = subprocess.check_output([jhove, filename, '-m', 'TIFF-hul']) if b'ErrorMessage: ' in out: for line in out.splitlines(): line = line.strip() if line.startswith(b'ErrorMessage: '): error = line[14:].decode('utf8') for i in ignore: if i in error: break else: raise ValueError(error) break
python
def validate_jhove(filename, jhove=None, ignore=None): """Validate TIFF file using jhove -m TIFF-hul. Raise ValueError if jhove outputs an error message unless the message contains one of the strings in 'ignore'. JHOVE does not support bigtiff or more than 50 IFDs. See `JHOVE TIFF-hul Module <http://jhove.sourceforge.net/tiff-hul.html>`_ """ import subprocess # noqa: delayed import if ignore is None: ignore = ['More than 50 IFDs'] if jhove is None: jhove = 'jhove' out = subprocess.check_output([jhove, filename, '-m', 'TIFF-hul']) if b'ErrorMessage: ' in out: for line in out.splitlines(): line = line.strip() if line.startswith(b'ErrorMessage: '): error = line[14:].decode('utf8') for i in ignore: if i in error: break else: raise ValueError(error) break
[ "def", "validate_jhove", "(", "filename", ",", "jhove", "=", "None", ",", "ignore", "=", "None", ")", ":", "import", "subprocess", "# noqa: delayed import", "if", "ignore", "is", "None", ":", "ignore", "=", "[", "'More than 50 IFDs'", "]", "if", "jhove", "is", "None", ":", "jhove", "=", "'jhove'", "out", "=", "subprocess", ".", "check_output", "(", "[", "jhove", ",", "filename", ",", "'-m'", ",", "'TIFF-hul'", "]", ")", "if", "b'ErrorMessage: '", "in", "out", ":", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "b'ErrorMessage: '", ")", ":", "error", "=", "line", "[", "14", ":", "]", ".", "decode", "(", "'utf8'", ")", "for", "i", "in", "ignore", ":", "if", "i", "in", "error", ":", "break", "else", ":", "raise", "ValueError", "(", "error", ")", "break" ]
Validate TIFF file using jhove -m TIFF-hul. Raise ValueError if jhove outputs an error message unless the message contains one of the strings in 'ignore'. JHOVE does not support bigtiff or more than 50 IFDs. See `JHOVE TIFF-hul Module <http://jhove.sourceforge.net/tiff-hul.html>`_
[ "Validate", "TIFF", "file", "using", "jhove", "-", "m", "TIFF", "-", "hul", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10684-L10711
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
lsm2bin
def lsm2bin(lsmfile, binfile=None, tile=None, verbose=True): """Convert [MP]TZCYX LSM file to series of BIN files. One BIN file containing 'ZCYX' data are created for each position, time, and tile. The position, time, and tile indices are encoded at the end of the filenames. """ verbose = print_ if verbose else nullfunc if tile is None: tile = (256, 256) if binfile is None: binfile = lsmfile elif binfile.lower() == 'none': binfile = None if binfile: binfile += '_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin' verbose('\nOpening LSM file... ', end='', flush=True) start_time = time.time() with TiffFile(lsmfile) as lsm: if not lsm.is_lsm: verbose('\n', lsm, flush=True) raise ValueError('not a LSM file') series = lsm.series[0] # first series contains the image data shape = series.shape axes = series.axes dtype = series.dtype size = product(shape) * dtype.itemsize verbose('%.3f s' % (time.time() - start_time)) # verbose(lsm, flush=True) verbose('Image\n axes: %s\n shape: %s\n dtype: %s\n size: %s' % (axes, shape, dtype, format_size(size)), flush=True) if not series.axes.endswith('TZCYX'): raise ValueError('not a *TZCYX LSM file') verbose('Copying image from LSM to BIN files', end='', flush=True) start_time = time.time() tiles = shape[-2] // tile[-2], shape[-1] // tile[-1] if binfile: binfile = binfile % (shape[-4], shape[-3], tile[0], tile[1]) shape = (1,) * (7-len(shape)) + shape # cache for ZCYX stacks and output files data = numpy.empty(shape[3:], dtype=dtype) out = numpy.empty((shape[-4], shape[-3], tile[0], tile[1]), dtype=dtype) # iterate over Tiff pages containing data pages = iter(series.pages) for m in range(shape[0]): # mosaic axis for p in range(shape[1]): # position axis for t in range(shape[2]): # time axis for z in range(shape[3]): # z slices data[z] = next(pages).asarray() for y in range(tiles[0]): # tile y for x in range(tiles[1]): # tile x out[:] = data[..., y*tile[0]:(y+1)*tile[0], x*tile[1]:(x+1)*tile[1]] if binfile: out.tofile(binfile % (m, p, t, y, x)) verbose('.', end='', flush=True) verbose(' %.3f s' % (time.time() - start_time))
python
def lsm2bin(lsmfile, binfile=None, tile=None, verbose=True): """Convert [MP]TZCYX LSM file to series of BIN files. One BIN file containing 'ZCYX' data are created for each position, time, and tile. The position, time, and tile indices are encoded at the end of the filenames. """ verbose = print_ if verbose else nullfunc if tile is None: tile = (256, 256) if binfile is None: binfile = lsmfile elif binfile.lower() == 'none': binfile = None if binfile: binfile += '_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin' verbose('\nOpening LSM file... ', end='', flush=True) start_time = time.time() with TiffFile(lsmfile) as lsm: if not lsm.is_lsm: verbose('\n', lsm, flush=True) raise ValueError('not a LSM file') series = lsm.series[0] # first series contains the image data shape = series.shape axes = series.axes dtype = series.dtype size = product(shape) * dtype.itemsize verbose('%.3f s' % (time.time() - start_time)) # verbose(lsm, flush=True) verbose('Image\n axes: %s\n shape: %s\n dtype: %s\n size: %s' % (axes, shape, dtype, format_size(size)), flush=True) if not series.axes.endswith('TZCYX'): raise ValueError('not a *TZCYX LSM file') verbose('Copying image from LSM to BIN files', end='', flush=True) start_time = time.time() tiles = shape[-2] // tile[-2], shape[-1] // tile[-1] if binfile: binfile = binfile % (shape[-4], shape[-3], tile[0], tile[1]) shape = (1,) * (7-len(shape)) + shape # cache for ZCYX stacks and output files data = numpy.empty(shape[3:], dtype=dtype) out = numpy.empty((shape[-4], shape[-3], tile[0], tile[1]), dtype=dtype) # iterate over Tiff pages containing data pages = iter(series.pages) for m in range(shape[0]): # mosaic axis for p in range(shape[1]): # position axis for t in range(shape[2]): # time axis for z in range(shape[3]): # z slices data[z] = next(pages).asarray() for y in range(tiles[0]): # tile y for x in range(tiles[1]): # tile x out[:] = data[..., y*tile[0]:(y+1)*tile[0], x*tile[1]:(x+1)*tile[1]] if binfile: out.tofile(binfile % (m, p, t, y, x)) verbose('.', end='', flush=True) verbose(' %.3f s' % (time.time() - start_time))
[ "def", "lsm2bin", "(", "lsmfile", ",", "binfile", "=", "None", ",", "tile", "=", "None", ",", "verbose", "=", "True", ")", ":", "verbose", "=", "print_", "if", "verbose", "else", "nullfunc", "if", "tile", "is", "None", ":", "tile", "=", "(", "256", ",", "256", ")", "if", "binfile", "is", "None", ":", "binfile", "=", "lsmfile", "elif", "binfile", ".", "lower", "(", ")", "==", "'none'", ":", "binfile", "=", "None", "if", "binfile", ":", "binfile", "+=", "'_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin'", "verbose", "(", "'\\nOpening LSM file... '", ",", "end", "=", "''", ",", "flush", "=", "True", ")", "start_time", "=", "time", ".", "time", "(", ")", "with", "TiffFile", "(", "lsmfile", ")", "as", "lsm", ":", "if", "not", "lsm", ".", "is_lsm", ":", "verbose", "(", "'\\n'", ",", "lsm", ",", "flush", "=", "True", ")", "raise", "ValueError", "(", "'not a LSM file'", ")", "series", "=", "lsm", ".", "series", "[", "0", "]", "# first series contains the image data", "shape", "=", "series", ".", "shape", "axes", "=", "series", ".", "axes", "dtype", "=", "series", ".", "dtype", "size", "=", "product", "(", "shape", ")", "*", "dtype", ".", "itemsize", "verbose", "(", "'%.3f s'", "%", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "# verbose(lsm, flush=True)", "verbose", "(", "'Image\\n axes: %s\\n shape: %s\\n dtype: %s\\n size: %s'", "%", "(", "axes", ",", "shape", ",", "dtype", ",", "format_size", "(", "size", ")", ")", ",", "flush", "=", "True", ")", "if", "not", "series", ".", "axes", ".", "endswith", "(", "'TZCYX'", ")", ":", "raise", "ValueError", "(", "'not a *TZCYX LSM file'", ")", "verbose", "(", "'Copying image from LSM to BIN files'", ",", "end", "=", "''", ",", "flush", "=", "True", ")", "start_time", "=", "time", ".", "time", "(", ")", "tiles", "=", "shape", "[", "-", "2", "]", "//", "tile", "[", "-", "2", "]", ",", "shape", "[", "-", "1", "]", "//", "tile", "[", "-", "1", "]", "if", "binfile", ":", "binfile", "=", "binfile", "%", "(", "shape", "[", "-", "4", "]", ",", "shape", "[", "-", "3", "]", ",", "tile", "[", "0", "]", ",", "tile", "[", "1", "]", ")", "shape", "=", "(", "1", ",", ")", "*", "(", "7", "-", "len", "(", "shape", ")", ")", "+", "shape", "# cache for ZCYX stacks and output files", "data", "=", "numpy", ".", "empty", "(", "shape", "[", "3", ":", "]", ",", "dtype", "=", "dtype", ")", "out", "=", "numpy", ".", "empty", "(", "(", "shape", "[", "-", "4", "]", ",", "shape", "[", "-", "3", "]", ",", "tile", "[", "0", "]", ",", "tile", "[", "1", "]", ")", ",", "dtype", "=", "dtype", ")", "# iterate over Tiff pages containing data", "pages", "=", "iter", "(", "series", ".", "pages", ")", "for", "m", "in", "range", "(", "shape", "[", "0", "]", ")", ":", "# mosaic axis", "for", "p", "in", "range", "(", "shape", "[", "1", "]", ")", ":", "# position axis", "for", "t", "in", "range", "(", "shape", "[", "2", "]", ")", ":", "# time axis", "for", "z", "in", "range", "(", "shape", "[", "3", "]", ")", ":", "# z slices", "data", "[", "z", "]", "=", "next", "(", "pages", ")", ".", "asarray", "(", ")", "for", "y", "in", "range", "(", "tiles", "[", "0", "]", ")", ":", "# tile y", "for", "x", "in", "range", "(", "tiles", "[", "1", "]", ")", ":", "# tile x", "out", "[", ":", "]", "=", "data", "[", "...", ",", "y", "*", "tile", "[", "0", "]", ":", "(", "y", "+", "1", ")", "*", "tile", "[", "0", "]", ",", "x", "*", "tile", "[", "1", "]", ":", "(", "x", "+", "1", ")", "*", "tile", "[", "1", "]", "]", "if", "binfile", ":", "out", ".", "tofile", "(", "binfile", "%", "(", "m", ",", "p", ",", "t", ",", "y", ",", "x", ")", ")", "verbose", "(", "'.'", ",", "end", "=", "''", ",", "flush", "=", "True", ")", "verbose", "(", "' %.3f s'", "%", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")" ]
Convert [MP]TZCYX LSM file to series of BIN files. One BIN file containing 'ZCYX' data are created for each position, time, and tile. The position, time, and tile indices are encoded at the end of the filenames.
[ "Convert", "[", "MP", "]", "TZCYX", "LSM", "file", "to", "series", "of", "BIN", "files", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10714-L10779
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
imshow
def imshow(data, photometric=None, planarconfig=None, bitspersample=None, interpolation=None, cmap=None, vmin=None, vmax=None, figure=None, title=None, dpi=96, subplot=None, maxdim=None, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported C{from matplotlib import pyplot}. Parameters ---------- data : nd array The image data. photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} The color space of the image data. planarconfig : {'CONTIG' or 'SEPARATE'} Defines how components of each pixel are stored. bitspersample : int Number of bits per channel in integer RGB images. interpolation : str The image interpolation method used in matplotlib.imshow. By default, 'nearest' will be used for image dimensions <= 512, else 'bilinear'. cmap : str or matplotlib.colors.Colormap The colormap maps non-RGBA scalar data to colors. vmin, vmax : scalar Data range covered by the colormap. By default, the complete range of the data is covered. figure : matplotlib.figure.Figure Matplotlib figure to use for plotting. title : str Window and subplot title. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int Maximum image width and length. kwargs : dict Additional arguments for matplotlib.pyplot.imshow. """ # TODO: rewrite detection of isrgb, iscontig # TODO: use planarconfig if photometric is None: photometric = 'RGB' if maxdim is None: maxdim = 2**16 isrgb = photometric in ('RGB', 'YCBCR') # 'PALETTE', 'YCBCR' if data.dtype == 'float16': data = data.astype('float32') if data.dtype.kind == 'b': isrgb = False if isrgb and not (data.shape[-1] in (3, 4) or ( data.ndim > 2 and data.shape[-3] in (3, 4))): isrgb = False photometric = 'MINISBLACK' data = data.squeeze() if photometric in ('MINISWHITE', 'MINISBLACK', None): data = reshape_nd(data, 2) else: data = reshape_nd(data, 3) dims = data.ndim if dims < 2: raise ValueError('not an image') if dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and data.shape[-1] < data.shape[-3] // 8 and data.shape[-1] < 5): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if interpolation is None: threshold = 512 elif isinstance(interpolation, int): threshold = interpolation else: threshold = 0 if isrgb: data = data[..., :maxdim, :maxdim, :maxdim] if threshold: if (data.shape[-2] > threshold or data.shape[-3] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' else: data = data[..., :maxdim, :maxdim] if threshold: if (data.shape[-1] > threshold or data.shape[-2] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' if photometric == 'PALETTE' and isrgb: datamax = data.max() if datamax > 255: data = data >> 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, inttypes): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data = data << (8 - bitspersample) elif bitspersample > 8: data = data >> (bitspersample - 8) # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax else: data = data / datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': data = numpy.absolute(data) datamax = data.max() if isrgb: vmin = 0 else: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) elif data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass size = len(title.splitlines()) if title else 1 pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03, left=0.1, right=0.95, hspace=0.05, wspace=0.0) if subplot is None: subplot = 111 subplot = pyplot.subplot(subplot) subplot.set_facecolor((0, 0, 0)) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.char == '?': cmap = 'gray' elif data.dtype.kind in 'buf' or vmin == 0: cmap = 'viridis' else: cmap = 'coolwarm' if photometric == 'MINISWHITE': cmap += '_r' image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return '%s @ %s [%4i, %4i]' % ( curaxdat[1][y, x], current, y, x) return '%s @ [%4i, %4i]' % (data[y, x], y, x) except IndexError: return '' def none(event): return '' subplot.format_coord = format_coord image.get_cursor_data = none image.format_cursor_data = none if dims: current = list((0,) * dims) curaxdat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas curaxdat[1] = data[tuple(current)].squeeze() image.set_data(curaxdat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) curaxdat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = curaxdat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image
python
def imshow(data, photometric=None, planarconfig=None, bitspersample=None, interpolation=None, cmap=None, vmin=None, vmax=None, figure=None, title=None, dpi=96, subplot=None, maxdim=None, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported C{from matplotlib import pyplot}. Parameters ---------- data : nd array The image data. photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} The color space of the image data. planarconfig : {'CONTIG' or 'SEPARATE'} Defines how components of each pixel are stored. bitspersample : int Number of bits per channel in integer RGB images. interpolation : str The image interpolation method used in matplotlib.imshow. By default, 'nearest' will be used for image dimensions <= 512, else 'bilinear'. cmap : str or matplotlib.colors.Colormap The colormap maps non-RGBA scalar data to colors. vmin, vmax : scalar Data range covered by the colormap. By default, the complete range of the data is covered. figure : matplotlib.figure.Figure Matplotlib figure to use for plotting. title : str Window and subplot title. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int Maximum image width and length. kwargs : dict Additional arguments for matplotlib.pyplot.imshow. """ # TODO: rewrite detection of isrgb, iscontig # TODO: use planarconfig if photometric is None: photometric = 'RGB' if maxdim is None: maxdim = 2**16 isrgb = photometric in ('RGB', 'YCBCR') # 'PALETTE', 'YCBCR' if data.dtype == 'float16': data = data.astype('float32') if data.dtype.kind == 'b': isrgb = False if isrgb and not (data.shape[-1] in (3, 4) or ( data.ndim > 2 and data.shape[-3] in (3, 4))): isrgb = False photometric = 'MINISBLACK' data = data.squeeze() if photometric in ('MINISWHITE', 'MINISBLACK', None): data = reshape_nd(data, 2) else: data = reshape_nd(data, 3) dims = data.ndim if dims < 2: raise ValueError('not an image') if dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and data.shape[-1] < data.shape[-3] // 8 and data.shape[-1] < 5): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if interpolation is None: threshold = 512 elif isinstance(interpolation, int): threshold = interpolation else: threshold = 0 if isrgb: data = data[..., :maxdim, :maxdim, :maxdim] if threshold: if (data.shape[-2] > threshold or data.shape[-3] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' else: data = data[..., :maxdim, :maxdim] if threshold: if (data.shape[-1] > threshold or data.shape[-2] > threshold): interpolation = 'bilinear' else: interpolation = 'nearest' if photometric == 'PALETTE' and isrgb: datamax = data.max() if datamax > 255: data = data >> 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, inttypes): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data = data << (8 - bitspersample) elif bitspersample > 8: data = data >> (bitspersample - 8) # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax else: data = data / datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': data = numpy.absolute(data) datamax = data.max() if isrgb: vmin = 0 else: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) elif data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data[data > dtmin]) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass size = len(title.splitlines()) if title else 1 pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03, left=0.1, right=0.95, hspace=0.05, wspace=0.0) if subplot is None: subplot = 111 subplot = pyplot.subplot(subplot) subplot.set_facecolor((0, 0, 0)) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.char == '?': cmap = 'gray' elif data.dtype.kind in 'buf' or vmin == 0: cmap = 'viridis' else: cmap = 'coolwarm' if photometric == 'MINISWHITE': cmap += '_r' image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return '%s @ %s [%4i, %4i]' % ( curaxdat[1][y, x], current, y, x) return '%s @ [%4i, %4i]' % (data[y, x], y, x) except IndexError: return '' def none(event): return '' subplot.format_coord = format_coord image.get_cursor_data = none image.format_cursor_data = none if dims: current = list((0,) * dims) curaxdat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas curaxdat[1] = data[tuple(current)].squeeze() image.set_data(curaxdat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) curaxdat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = curaxdat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image
[ "def", "imshow", "(", "data", ",", "photometric", "=", "None", ",", "planarconfig", "=", "None", ",", "bitspersample", "=", "None", ",", "interpolation", "=", "None", ",", "cmap", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "figure", "=", "None", ",", "title", "=", "None", ",", "dpi", "=", "96", ",", "subplot", "=", "None", ",", "maxdim", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO: rewrite detection of isrgb, iscontig", "# TODO: use planarconfig", "if", "photometric", "is", "None", ":", "photometric", "=", "'RGB'", "if", "maxdim", "is", "None", ":", "maxdim", "=", "2", "**", "16", "isrgb", "=", "photometric", "in", "(", "'RGB'", ",", "'YCBCR'", ")", "# 'PALETTE', 'YCBCR'", "if", "data", ".", "dtype", "==", "'float16'", ":", "data", "=", "data", ".", "astype", "(", "'float32'", ")", "if", "data", ".", "dtype", ".", "kind", "==", "'b'", ":", "isrgb", "=", "False", "if", "isrgb", "and", "not", "(", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "or", "(", "data", ".", "ndim", ">", "2", "and", "data", ".", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ")", ")", ":", "isrgb", "=", "False", "photometric", "=", "'MINISBLACK'", "data", "=", "data", ".", "squeeze", "(", ")", "if", "photometric", "in", "(", "'MINISWHITE'", ",", "'MINISBLACK'", ",", "None", ")", ":", "data", "=", "reshape_nd", "(", "data", ",", "2", ")", "else", ":", "data", "=", "reshape_nd", "(", "data", ",", "3", ")", "dims", "=", "data", ".", "ndim", "if", "dims", "<", "2", ":", "raise", "ValueError", "(", "'not an image'", ")", "if", "dims", "==", "2", ":", "dims", "=", "0", "isrgb", "=", "False", "else", ":", "if", "isrgb", "and", "data", ".", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "2", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "elif", "not", "isrgb", "and", "(", "data", ".", "shape", "[", "-", "1", "]", "<", "data", ".", "shape", "[", "-", "2", "]", "//", "8", "and", "data", ".", "shape", "[", "-", "1", "]", "<", "data", ".", "shape", "[", "-", "3", "]", "//", "8", "and", "data", ".", "shape", "[", "-", "1", "]", "<", "5", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "1", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "isrgb", "=", "isrgb", "and", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "dims", "-=", "3", "if", "isrgb", "else", "2", "if", "interpolation", "is", "None", ":", "threshold", "=", "512", "elif", "isinstance", "(", "interpolation", ",", "int", ")", ":", "threshold", "=", "interpolation", "else", ":", "threshold", "=", "0", "if", "isrgb", ":", "data", "=", "data", "[", "...", ",", ":", "maxdim", ",", ":", "maxdim", ",", ":", "maxdim", "]", "if", "threshold", ":", "if", "(", "data", ".", "shape", "[", "-", "2", "]", ">", "threshold", "or", "data", ".", "shape", "[", "-", "3", "]", ">", "threshold", ")", ":", "interpolation", "=", "'bilinear'", "else", ":", "interpolation", "=", "'nearest'", "else", ":", "data", "=", "data", "[", "...", ",", ":", "maxdim", ",", ":", "maxdim", "]", "if", "threshold", ":", "if", "(", "data", ".", "shape", "[", "-", "1", "]", ">", "threshold", "or", "data", ".", "shape", "[", "-", "2", "]", ">", "threshold", ")", ":", "interpolation", "=", "'bilinear'", "else", ":", "interpolation", "=", "'nearest'", "if", "photometric", "==", "'PALETTE'", "and", "isrgb", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "datamax", ">", "255", ":", "data", "=", "data", ">>", "8", "# possible precision loss", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "in", "'ui'", ":", "if", "not", "(", "isrgb", "and", "data", ".", "dtype", ".", "itemsize", "<=", "1", ")", "or", "bitspersample", "is", "None", ":", "try", ":", "bitspersample", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "data", ".", "max", "(", ")", ",", "2", ")", ")", ")", "except", "Exception", ":", "bitspersample", "=", "data", ".", "dtype", ".", "itemsize", "*", "8", "elif", "not", "isinstance", "(", "bitspersample", ",", "inttypes", ")", ":", "# bitspersample can be tuple, e.g. (5, 6, 5)", "bitspersample", "=", "data", ".", "dtype", ".", "itemsize", "*", "8", "datamax", "=", "2", "**", "bitspersample", "if", "isrgb", ":", "if", "bitspersample", "<", "8", ":", "data", "=", "data", "<<", "(", "8", "-", "bitspersample", ")", "elif", "bitspersample", ">", "8", ":", "data", "=", "data", ">>", "(", "bitspersample", "-", "8", ")", "# precision loss", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "isrgb", "and", "datamax", ">", "1.0", ":", "if", "data", ".", "dtype", ".", "char", "==", "'d'", ":", "data", "=", "data", ".", "astype", "(", "'f'", ")", "data", "/=", "datamax", "else", ":", "data", "=", "data", "/", "datamax", "elif", "data", ".", "dtype", ".", "kind", "==", "'b'", ":", "datamax", "=", "1", "elif", "data", ".", "dtype", ".", "kind", "==", "'c'", ":", "data", "=", "numpy", ".", "absolute", "(", "data", ")", "datamax", "=", "data", ".", "max", "(", ")", "if", "isrgb", ":", "vmin", "=", "0", "else", ":", "if", "vmax", "is", "None", ":", "vmax", "=", "datamax", "if", "vmin", "is", "None", ":", "if", "data", ".", "dtype", ".", "kind", "==", "'i'", ":", "dtmin", "=", "numpy", ".", "iinfo", "(", "data", ".", "dtype", ")", ".", "min", "vmin", "=", "numpy", ".", "min", "(", "data", ")", "if", "vmin", "==", "dtmin", ":", "vmin", "=", "numpy", ".", "min", "(", "data", "[", "data", ">", "dtmin", "]", ")", "elif", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "dtmin", "=", "numpy", ".", "finfo", "(", "data", ".", "dtype", ")", ".", "min", "vmin", "=", "numpy", ".", "min", "(", "data", ")", "if", "vmin", "==", "dtmin", ":", "vmin", "=", "numpy", ".", "min", "(", "data", "[", "data", ">", "dtmin", "]", ")", "else", ":", "vmin", "=", "0", "pyplot", "=", "sys", ".", "modules", "[", "'matplotlib.pyplot'", "]", "if", "figure", "is", "None", ":", "pyplot", ".", "rc", "(", "'font'", ",", "family", "=", "'sans-serif'", ",", "weight", "=", "'normal'", ",", "size", "=", "8", ")", "figure", "=", "pyplot", ".", "figure", "(", "dpi", "=", "dpi", ",", "figsize", "=", "(", "10.3", ",", "6.3", ")", ",", "frameon", "=", "True", ",", "facecolor", "=", "'1.0'", ",", "edgecolor", "=", "'w'", ")", "try", ":", "figure", ".", "canvas", ".", "manager", ".", "window", ".", "title", "(", "title", ")", "except", "Exception", ":", "pass", "size", "=", "len", "(", "title", ".", "splitlines", "(", ")", ")", "if", "title", "else", "1", "pyplot", ".", "subplots_adjust", "(", "bottom", "=", "0.03", "*", "(", "dims", "+", "2", ")", ",", "top", "=", "0.98", "-", "size", "*", "0.03", ",", "left", "=", "0.1", ",", "right", "=", "0.95", ",", "hspace", "=", "0.05", ",", "wspace", "=", "0.0", ")", "if", "subplot", "is", "None", ":", "subplot", "=", "111", "subplot", "=", "pyplot", ".", "subplot", "(", "subplot", ")", "subplot", ".", "set_facecolor", "(", "(", "0", ",", "0", ",", "0", ")", ")", "if", "title", ":", "try", ":", "title", "=", "unicode", "(", "title", ",", "'Windows-1252'", ")", "except", "TypeError", ":", "pass", "pyplot", ".", "title", "(", "title", ",", "size", "=", "11", ")", "if", "cmap", "is", "None", ":", "if", "data", ".", "dtype", ".", "char", "==", "'?'", ":", "cmap", "=", "'gray'", "elif", "data", ".", "dtype", ".", "kind", "in", "'buf'", "or", "vmin", "==", "0", ":", "cmap", "=", "'viridis'", "else", ":", "cmap", "=", "'coolwarm'", "if", "photometric", "==", "'MINISWHITE'", ":", "cmap", "+=", "'_r'", "image", "=", "pyplot", ".", "imshow", "(", "numpy", ".", "atleast_2d", "(", "data", "[", "(", "0", ",", ")", "*", "dims", "]", ".", "squeeze", "(", ")", ")", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "cmap", "=", "cmap", ",", "interpolation", "=", "interpolation", ",", "*", "*", "kwargs", ")", "if", "not", "isrgb", ":", "pyplot", ".", "colorbar", "(", ")", "# panchor=(0.55, 0.5), fraction=0.05", "def", "format_coord", "(", "x", ",", "y", ")", ":", "# callback function to format coordinate display in toolbar", "x", "=", "int", "(", "x", "+", "0.5", ")", "y", "=", "int", "(", "y", "+", "0.5", ")", "try", ":", "if", "dims", ":", "return", "'%s @ %s [%4i, %4i]'", "%", "(", "curaxdat", "[", "1", "]", "[", "y", ",", "x", "]", ",", "current", ",", "y", ",", "x", ")", "return", "'%s @ [%4i, %4i]'", "%", "(", "data", "[", "y", ",", "x", "]", ",", "y", ",", "x", ")", "except", "IndexError", ":", "return", "''", "def", "none", "(", "event", ")", ":", "return", "''", "subplot", ".", "format_coord", "=", "format_coord", "image", ".", "get_cursor_data", "=", "none", "image", ".", "format_cursor_data", "=", "none", "if", "dims", ":", "current", "=", "list", "(", "(", "0", ",", ")", "*", "dims", ")", "curaxdat", "=", "[", "0", ",", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "]", "sliders", "=", "[", "pyplot", ".", "Slider", "(", "pyplot", ".", "axes", "(", "[", "0.125", ",", "0.03", "*", "(", "axis", "+", "1", ")", ",", "0.725", ",", "0.025", "]", ")", ",", "'Dimension %i'", "%", "axis", ",", "0", ",", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "0", ",", "facecolor", "=", "'0.5'", ",", "valfmt", "=", "'%%.0f [%i]'", "%", "data", ".", "shape", "[", "axis", "]", ")", "for", "axis", "in", "range", "(", "dims", ")", "]", "for", "slider", "in", "sliders", ":", "slider", ".", "drawon", "=", "False", "def", "set_image", "(", "current", ",", "sliders", "=", "sliders", ",", "data", "=", "data", ")", ":", "# change image and redraw canvas", "curaxdat", "[", "1", "]", "=", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "image", ".", "set_data", "(", "curaxdat", "[", "1", "]", ")", "for", "ctrl", ",", "index", "in", "zip", "(", "sliders", ",", "current", ")", ":", "ctrl", ".", "eventson", "=", "False", "ctrl", ".", "set_val", "(", "index", ")", "ctrl", ".", "eventson", "=", "True", "figure", ".", "canvas", ".", "draw", "(", ")", "def", "on_changed", "(", "index", ",", "axis", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "# callback function for slider change event", "index", "=", "int", "(", "round", "(", "index", ")", ")", "curaxdat", "[", "0", "]", "=", "axis", "if", "index", "==", "current", "[", "axis", "]", ":", "return", "if", "index", ">=", "data", ".", "shape", "[", "axis", "]", ":", "index", "=", "0", "elif", "index", "<", "0", ":", "index", "=", "data", ".", "shape", "[", "axis", "]", "-", "1", "current", "[", "axis", "]", "=", "index", "set_image", "(", "current", ")", "def", "on_keypressed", "(", "event", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "# callback function for key press event", "key", "=", "event", ".", "key", "axis", "=", "curaxdat", "[", "0", "]", "if", "str", "(", "key", ")", "in", "'0123456789'", ":", "on_changed", "(", "key", ",", "axis", ")", "elif", "key", "==", "'right'", ":", "on_changed", "(", "current", "[", "axis", "]", "+", "1", ",", "axis", ")", "elif", "key", "==", "'left'", ":", "on_changed", "(", "current", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'up'", ":", "curaxdat", "[", "0", "]", "=", "0", "if", "axis", "==", "len", "(", "data", ".", "shape", ")", "-", "1", "else", "axis", "+", "1", "elif", "key", "==", "'down'", ":", "curaxdat", "[", "0", "]", "=", "len", "(", "data", ".", "shape", ")", "-", "1", "if", "axis", "==", "0", "else", "axis", "-", "1", "elif", "key", "==", "'end'", ":", "on_changed", "(", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'home'", ":", "on_changed", "(", "0", ",", "axis", ")", "figure", ".", "canvas", ".", "mpl_connect", "(", "'key_press_event'", ",", "on_keypressed", ")", "for", "axis", ",", "ctrl", "in", "enumerate", "(", "sliders", ")", ":", "ctrl", ".", "on_changed", "(", "lambda", "k", ",", "a", "=", "axis", ":", "on_changed", "(", "k", ",", "a", ")", ")", "return", "figure", ",", "subplot", ",", "image" ]
Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported C{from matplotlib import pyplot}. Parameters ---------- data : nd array The image data. photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} The color space of the image data. planarconfig : {'CONTIG' or 'SEPARATE'} Defines how components of each pixel are stored. bitspersample : int Number of bits per channel in integer RGB images. interpolation : str The image interpolation method used in matplotlib.imshow. By default, 'nearest' will be used for image dimensions <= 512, else 'bilinear'. cmap : str or matplotlib.colors.Colormap The colormap maps non-RGBA scalar data to colors. vmin, vmax : scalar Data range covered by the colormap. By default, the complete range of the data is covered. figure : matplotlib.figure.Figure Matplotlib figure to use for plotting. title : str Window and subplot title. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int Maximum image width and length. kwargs : dict Additional arguments for matplotlib.pyplot.imshow.
[ "Plot", "n", "-", "dimensional", "images", "using", "matplotlib", ".", "pyplot", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10782-L11057
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
askopenfilename
def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames
python
def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames
[ "def", "askopenfilename", "(", "*", "*", "kwargs", ")", ":", "try", ":", "from", "Tkinter", "import", "Tk", "import", "tkFileDialog", "as", "filedialog", "except", "ImportError", ":", "from", "tkinter", "import", "Tk", ",", "filedialog", "root", "=", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "root", ".", "update", "(", ")", "filenames", "=", "filedialog", ".", "askopenfilename", "(", "*", "*", "kwargs", ")", "root", ".", "destroy", "(", ")", "return", "filenames" ]
Return file name(s) from Tkinter's file open dialog.
[ "Return", "file", "name", "(", "s", ")", "from", "Tkinter", "s", "file", "open", "dialog", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L11066-L11078
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
main
def main(argv=None): """Tifffile command line usage main function.""" if argv is None: argv = sys.argv log.setLevel(logging.INFO) import optparse # TODO: use argparse parser = optparse.OptionParser( usage='usage: %prog [options] path', description='Display image data in TIFF files.', version='%%prog %s' % __version__, prog='tifffile') opt = parser.add_option opt('-p', '--page', dest='page', type='int', default=-1, help='display single page') opt('-s', '--series', dest='series', type='int', default=-1, help='display series of pages of same shape') opt('--nomultifile', dest='nomultifile', action='store_true', default=False, help='do not read OME series from multiple files') opt('--noplots', dest='noplots', type='int', default=10, help='maximum number of plots') opt('--interpol', dest='interpol', metavar='INTERPOL', default=None, help='image interpolation method') opt('--dpi', dest='dpi', type='int', default=96, help='plot resolution') opt('--vmin', dest='vmin', type='int', default=None, help='minimum value for colormapping') opt('--vmax', dest='vmax', type='int', default=None, help='maximum value for colormapping') opt('--debug', dest='debug', action='store_true', default=False, help='raise exception on failures') opt('--doctest', dest='doctest', action='store_true', default=False, help='runs the docstring examples') opt('-v', '--detail', dest='detail', type='int', default=2) opt('-q', '--quiet', dest='quiet', action='store_true') settings, path = parser.parse_args() path = ' '.join(path) if settings.doctest: import doctest if sys.version_info < (3, 6): print('Doctests work with Python >=3.6 only') return 0 doctest.testmod(optionflags=doctest.ELLIPSIS) return 0 if not path: path = askopenfilename(title='Select a TIFF file', filetypes=TIFF.FILEOPEN_FILTER) if not path: parser.error('No file specified') if any(i in path for i in '?*'): path = glob.glob(path) if not path: print('No files match the pattern') return 0 # TODO: handle image sequences path = path[0] if not settings.quiet: print_('\nReading TIFF header:', end=' ', flush=True) start = time.time() try: tif = TiffFile(path, multifile=not settings.nomultifile) except Exception as exc: if settings.debug: raise print('\n\n%s: %s' % (exc.__class__.__name__, exc)) sys.exit(0) if not settings.quiet: print('%.3f ms' % ((time.time()-start) * 1e3)) if tif.is_ome: settings.norgb = True images = [] if settings.noplots > 0: if not settings.quiet: print_('Reading image data: ', end=' ', flush=True) def notnone(x): return next(i for i in x if i is not None) start = time.time() try: if settings.page >= 0: images = [(tif.asarray(key=settings.page), tif[settings.page], None)] elif settings.series >= 0: images = [(tif.asarray(series=settings.series), notnone(tif.series[settings.series]._pages), tif.series[settings.series])] else: for i, s in enumerate(tif.series[:settings.noplots]): try: images.append((tif.asarray(series=i), notnone(s._pages), tif.series[i])) except Exception as exc: images.append((None, notnone(s.pages), None)) if settings.debug: raise print('\nSeries %i failed with %s: %s... ' % (i, exc.__class__.__name__, exc), end='') except Exception as exc: if settings.debug: raise print('%s: %s' % (exc.__class__.__name__, exc)) if not settings.quiet: print('%.3f ms' % ((time.time()-start) * 1e3)) if not settings.quiet: print_('Generating printout:', end=' ', flush=True) start = time.time() info = TiffFile.__str__(tif, detail=int(settings.detail)) print('%.3f ms' % ((time.time()-start) * 1e3)) print() print(info) print() tif.close() if images and settings.noplots > 0: try: import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot except ImportError as exc: log.warning('tifffile.main: %s: %s', exc.__class__.__name__, exc) else: for img, page, series in images: if img is None: continue vmin, vmax = settings.vmin, settings.vmax if 'GDAL_NODATA' in page.tags: try: vmin = numpy.min( img[img > float(page.tags['GDAL_NODATA'].value)]) except ValueError: pass if tif.is_stk: try: vmin = tif.stk_metadata['MinScale'] vmax = tif.stk_metadata['MaxScale'] except KeyError: pass else: if vmax <= vmin: vmin, vmax = settings.vmin, settings.vmax if series: title = '%s\n%s\n%s' % (str(tif), str(page), str(series)) else: title = '%s\n %s' % (str(tif), str(page)) photometric = 'MINISBLACK' if page.photometric not in (3,): photometric = TIFF.PHOTOMETRIC(page.photometric).name imshow(img, title=title, vmin=vmin, vmax=vmax, bitspersample=page.bitspersample, photometric=photometric, interpolation=settings.interpol, dpi=settings.dpi) pyplot.show() return 0
python
def main(argv=None): """Tifffile command line usage main function.""" if argv is None: argv = sys.argv log.setLevel(logging.INFO) import optparse # TODO: use argparse parser = optparse.OptionParser( usage='usage: %prog [options] path', description='Display image data in TIFF files.', version='%%prog %s' % __version__, prog='tifffile') opt = parser.add_option opt('-p', '--page', dest='page', type='int', default=-1, help='display single page') opt('-s', '--series', dest='series', type='int', default=-1, help='display series of pages of same shape') opt('--nomultifile', dest='nomultifile', action='store_true', default=False, help='do not read OME series from multiple files') opt('--noplots', dest='noplots', type='int', default=10, help='maximum number of plots') opt('--interpol', dest='interpol', metavar='INTERPOL', default=None, help='image interpolation method') opt('--dpi', dest='dpi', type='int', default=96, help='plot resolution') opt('--vmin', dest='vmin', type='int', default=None, help='minimum value for colormapping') opt('--vmax', dest='vmax', type='int', default=None, help='maximum value for colormapping') opt('--debug', dest='debug', action='store_true', default=False, help='raise exception on failures') opt('--doctest', dest='doctest', action='store_true', default=False, help='runs the docstring examples') opt('-v', '--detail', dest='detail', type='int', default=2) opt('-q', '--quiet', dest='quiet', action='store_true') settings, path = parser.parse_args() path = ' '.join(path) if settings.doctest: import doctest if sys.version_info < (3, 6): print('Doctests work with Python >=3.6 only') return 0 doctest.testmod(optionflags=doctest.ELLIPSIS) return 0 if not path: path = askopenfilename(title='Select a TIFF file', filetypes=TIFF.FILEOPEN_FILTER) if not path: parser.error('No file specified') if any(i in path for i in '?*'): path = glob.glob(path) if not path: print('No files match the pattern') return 0 # TODO: handle image sequences path = path[0] if not settings.quiet: print_('\nReading TIFF header:', end=' ', flush=True) start = time.time() try: tif = TiffFile(path, multifile=not settings.nomultifile) except Exception as exc: if settings.debug: raise print('\n\n%s: %s' % (exc.__class__.__name__, exc)) sys.exit(0) if not settings.quiet: print('%.3f ms' % ((time.time()-start) * 1e3)) if tif.is_ome: settings.norgb = True images = [] if settings.noplots > 0: if not settings.quiet: print_('Reading image data: ', end=' ', flush=True) def notnone(x): return next(i for i in x if i is not None) start = time.time() try: if settings.page >= 0: images = [(tif.asarray(key=settings.page), tif[settings.page], None)] elif settings.series >= 0: images = [(tif.asarray(series=settings.series), notnone(tif.series[settings.series]._pages), tif.series[settings.series])] else: for i, s in enumerate(tif.series[:settings.noplots]): try: images.append((tif.asarray(series=i), notnone(s._pages), tif.series[i])) except Exception as exc: images.append((None, notnone(s.pages), None)) if settings.debug: raise print('\nSeries %i failed with %s: %s... ' % (i, exc.__class__.__name__, exc), end='') except Exception as exc: if settings.debug: raise print('%s: %s' % (exc.__class__.__name__, exc)) if not settings.quiet: print('%.3f ms' % ((time.time()-start) * 1e3)) if not settings.quiet: print_('Generating printout:', end=' ', flush=True) start = time.time() info = TiffFile.__str__(tif, detail=int(settings.detail)) print('%.3f ms' % ((time.time()-start) * 1e3)) print() print(info) print() tif.close() if images and settings.noplots > 0: try: import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot except ImportError as exc: log.warning('tifffile.main: %s: %s', exc.__class__.__name__, exc) else: for img, page, series in images: if img is None: continue vmin, vmax = settings.vmin, settings.vmax if 'GDAL_NODATA' in page.tags: try: vmin = numpy.min( img[img > float(page.tags['GDAL_NODATA'].value)]) except ValueError: pass if tif.is_stk: try: vmin = tif.stk_metadata['MinScale'] vmax = tif.stk_metadata['MaxScale'] except KeyError: pass else: if vmax <= vmin: vmin, vmax = settings.vmin, settings.vmax if series: title = '%s\n%s\n%s' % (str(tif), str(page), str(series)) else: title = '%s\n %s' % (str(tif), str(page)) photometric = 'MINISBLACK' if page.photometric not in (3,): photometric = TIFF.PHOTOMETRIC(page.photometric).name imshow(img, title=title, vmin=vmin, vmax=vmax, bitspersample=page.bitspersample, photometric=photometric, interpolation=settings.interpol, dpi=settings.dpi) pyplot.show() return 0
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "import", "optparse", "# TODO: use argparse", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "'usage: %prog [options] path'", ",", "description", "=", "'Display image data in TIFF files.'", ",", "version", "=", "'%%prog %s'", "%", "__version__", ",", "prog", "=", "'tifffile'", ")", "opt", "=", "parser", ".", "add_option", "opt", "(", "'-p'", ",", "'--page'", ",", "dest", "=", "'page'", ",", "type", "=", "'int'", ",", "default", "=", "-", "1", ",", "help", "=", "'display single page'", ")", "opt", "(", "'-s'", ",", "'--series'", ",", "dest", "=", "'series'", ",", "type", "=", "'int'", ",", "default", "=", "-", "1", ",", "help", "=", "'display series of pages of same shape'", ")", "opt", "(", "'--nomultifile'", ",", "dest", "=", "'nomultifile'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'do not read OME series from multiple files'", ")", "opt", "(", "'--noplots'", ",", "dest", "=", "'noplots'", ",", "type", "=", "'int'", ",", "default", "=", "10", ",", "help", "=", "'maximum number of plots'", ")", "opt", "(", "'--interpol'", ",", "dest", "=", "'interpol'", ",", "metavar", "=", "'INTERPOL'", ",", "default", "=", "None", ",", "help", "=", "'image interpolation method'", ")", "opt", "(", "'--dpi'", ",", "dest", "=", "'dpi'", ",", "type", "=", "'int'", ",", "default", "=", "96", ",", "help", "=", "'plot resolution'", ")", "opt", "(", "'--vmin'", ",", "dest", "=", "'vmin'", ",", "type", "=", "'int'", ",", "default", "=", "None", ",", "help", "=", "'minimum value for colormapping'", ")", "opt", "(", "'--vmax'", ",", "dest", "=", "'vmax'", ",", "type", "=", "'int'", ",", "default", "=", "None", ",", "help", "=", "'maximum value for colormapping'", ")", "opt", "(", "'--debug'", ",", "dest", "=", "'debug'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'raise exception on failures'", ")", "opt", "(", "'--doctest'", ",", "dest", "=", "'doctest'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'runs the docstring examples'", ")", "opt", "(", "'-v'", ",", "'--detail'", ",", "dest", "=", "'detail'", ",", "type", "=", "'int'", ",", "default", "=", "2", ")", "opt", "(", "'-q'", ",", "'--quiet'", ",", "dest", "=", "'quiet'", ",", "action", "=", "'store_true'", ")", "settings", ",", "path", "=", "parser", ".", "parse_args", "(", ")", "path", "=", "' '", ".", "join", "(", "path", ")", "if", "settings", ".", "doctest", ":", "import", "doctest", "if", "sys", ".", "version_info", "<", "(", "3", ",", "6", ")", ":", "print", "(", "'Doctests work with Python >=3.6 only'", ")", "return", "0", "doctest", ".", "testmod", "(", "optionflags", "=", "doctest", ".", "ELLIPSIS", ")", "return", "0", "if", "not", "path", ":", "path", "=", "askopenfilename", "(", "title", "=", "'Select a TIFF file'", ",", "filetypes", "=", "TIFF", ".", "FILEOPEN_FILTER", ")", "if", "not", "path", ":", "parser", ".", "error", "(", "'No file specified'", ")", "if", "any", "(", "i", "in", "path", "for", "i", "in", "'?*'", ")", ":", "path", "=", "glob", ".", "glob", "(", "path", ")", "if", "not", "path", ":", "print", "(", "'No files match the pattern'", ")", "return", "0", "# TODO: handle image sequences", "path", "=", "path", "[", "0", "]", "if", "not", "settings", ".", "quiet", ":", "print_", "(", "'\\nReading TIFF header:'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "start", "=", "time", ".", "time", "(", ")", "try", ":", "tif", "=", "TiffFile", "(", "path", ",", "multifile", "=", "not", "settings", ".", "nomultifile", ")", "except", "Exception", "as", "exc", ":", "if", "settings", ".", "debug", ":", "raise", "print", "(", "'\\n\\n%s: %s'", "%", "(", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", ")", "sys", ".", "exit", "(", "0", ")", "if", "not", "settings", ".", "quiet", ":", "print", "(", "'%.3f ms'", "%", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1e3", ")", ")", "if", "tif", ".", "is_ome", ":", "settings", ".", "norgb", "=", "True", "images", "=", "[", "]", "if", "settings", ".", "noplots", ">", "0", ":", "if", "not", "settings", ".", "quiet", ":", "print_", "(", "'Reading image data: '", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "def", "notnone", "(", "x", ")", ":", "return", "next", "(", "i", "for", "i", "in", "x", "if", "i", "is", "not", "None", ")", "start", "=", "time", ".", "time", "(", ")", "try", ":", "if", "settings", ".", "page", ">=", "0", ":", "images", "=", "[", "(", "tif", ".", "asarray", "(", "key", "=", "settings", ".", "page", ")", ",", "tif", "[", "settings", ".", "page", "]", ",", "None", ")", "]", "elif", "settings", ".", "series", ">=", "0", ":", "images", "=", "[", "(", "tif", ".", "asarray", "(", "series", "=", "settings", ".", "series", ")", ",", "notnone", "(", "tif", ".", "series", "[", "settings", ".", "series", "]", ".", "_pages", ")", ",", "tif", ".", "series", "[", "settings", ".", "series", "]", ")", "]", "else", ":", "for", "i", ",", "s", "in", "enumerate", "(", "tif", ".", "series", "[", ":", "settings", ".", "noplots", "]", ")", ":", "try", ":", "images", ".", "append", "(", "(", "tif", ".", "asarray", "(", "series", "=", "i", ")", ",", "notnone", "(", "s", ".", "_pages", ")", ",", "tif", ".", "series", "[", "i", "]", ")", ")", "except", "Exception", "as", "exc", ":", "images", ".", "append", "(", "(", "None", ",", "notnone", "(", "s", ".", "pages", ")", ",", "None", ")", ")", "if", "settings", ".", "debug", ":", "raise", "print", "(", "'\\nSeries %i failed with %s: %s... '", "%", "(", "i", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", ",", "end", "=", "''", ")", "except", "Exception", "as", "exc", ":", "if", "settings", ".", "debug", ":", "raise", "print", "(", "'%s: %s'", "%", "(", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", ")", "if", "not", "settings", ".", "quiet", ":", "print", "(", "'%.3f ms'", "%", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1e3", ")", ")", "if", "not", "settings", ".", "quiet", ":", "print_", "(", "'Generating printout:'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "start", "=", "time", ".", "time", "(", ")", "info", "=", "TiffFile", ".", "__str__", "(", "tif", ",", "detail", "=", "int", "(", "settings", ".", "detail", ")", ")", "print", "(", "'%.3f ms'", "%", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1e3", ")", ")", "print", "(", ")", "print", "(", "info", ")", "print", "(", ")", "tif", ".", "close", "(", ")", "if", "images", "and", "settings", ".", "noplots", ">", "0", ":", "try", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'TkAgg'", ")", "from", "matplotlib", "import", "pyplot", "except", "ImportError", "as", "exc", ":", "log", ".", "warning", "(", "'tifffile.main: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "else", ":", "for", "img", ",", "page", ",", "series", "in", "images", ":", "if", "img", "is", "None", ":", "continue", "vmin", ",", "vmax", "=", "settings", ".", "vmin", ",", "settings", ".", "vmax", "if", "'GDAL_NODATA'", "in", "page", ".", "tags", ":", "try", ":", "vmin", "=", "numpy", ".", "min", "(", "img", "[", "img", ">", "float", "(", "page", ".", "tags", "[", "'GDAL_NODATA'", "]", ".", "value", ")", "]", ")", "except", "ValueError", ":", "pass", "if", "tif", ".", "is_stk", ":", "try", ":", "vmin", "=", "tif", ".", "stk_metadata", "[", "'MinScale'", "]", "vmax", "=", "tif", ".", "stk_metadata", "[", "'MaxScale'", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "vmax", "<=", "vmin", ":", "vmin", ",", "vmax", "=", "settings", ".", "vmin", ",", "settings", ".", "vmax", "if", "series", ":", "title", "=", "'%s\\n%s\\n%s'", "%", "(", "str", "(", "tif", ")", ",", "str", "(", "page", ")", ",", "str", "(", "series", ")", ")", "else", ":", "title", "=", "'%s\\n %s'", "%", "(", "str", "(", "tif", ")", ",", "str", "(", "page", ")", ")", "photometric", "=", "'MINISBLACK'", "if", "page", ".", "photometric", "not", "in", "(", "3", ",", ")", ":", "photometric", "=", "TIFF", ".", "PHOTOMETRIC", "(", "page", ".", "photometric", ")", ".", "name", "imshow", "(", "img", ",", "title", "=", "title", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "bitspersample", "=", "page", ".", "bitspersample", ",", "photometric", "=", "photometric", ",", "interpolation", "=", "settings", ".", "interpol", ",", "dpi", "=", "settings", ".", "dpi", ")", "pyplot", ".", "show", "(", ")", "return", "0" ]
Tifffile command line usage main function.
[ "Tifffile", "command", "line", "usage", "main", "function", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L11081-L11246
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffWriter.save
def save(self, data=None, shape=None, dtype=None, returnoffset=False, photometric=None, planarconfig=None, extrasamples=None, tile=None, contiguous=True, align=16, truncate=False, compress=0, rowsperstrip=None, predictor=False, colormap=None, description=None, datetime=None, resolution=None, subfiletype=0, software='tifffile.py', metadata={}, ijmetadata=None, extratags=()): """Write numpy array and tags to TIFF file. The data shape's last dimensions are assumed to be image depth, height (length), width, and samples. If a colormap is provided, the data's dtype must be uint8 or uint16 and the data values are indices into the last dimension of the colormap. If 'shape' and 'dtype' are specified, an empty array is saved. This option cannot be used with compression or multiple tiles. Image data are written uncompressed in one strip per plane by default. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The SampleFormat and BitsPerSample tags are derived from the data type. Parameters ---------- data : numpy.ndarray or None Input image array. shape : tuple or None Shape of the empty array to save. Used only if 'data' is None. dtype : numpy.dtype or None Data-type of the empty array to save. Used only if 'data' is None. returnoffset : bool If True and the image data in the file is memory-mappable, return the offset and number of bytes of the image data in the file. photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} The color space of the image data. By default, this setting is inferred from the data shape and the value of colormap. For CFA images, DNG tags must be specified in 'extratags'. planarconfig : {'CONTIG', 'SEPARATE'} Specifies if samples are stored interleaved or in separate planes. By default, this setting is inferred from the data shape. If this parameter is set, extra samples are used to store grayscale images. 'CONTIG': last dimension contains samples. 'SEPARATE': third last dimension contains samples. extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'} Defines the interpretation of extra components in pixels. 'UNSPECIFIED': no transparency information (default). 'ASSOCALPHA': single, true transparency with pre-multiplied color. 'UNASSALPHA': independent transparency masks. tile : tuple of int The shape (depth, length, width) of image tiles to write. If None (default), image data are written in strips. The tile length and width must be a multiple of 16. If the tile depth is provided, the SGI ImageDepth and TileDepth tags are used to save volume data. Unless a single tile is used, tiles cannot be used to write contiguous files. Few software can read the SGI format, e.g. MeVisLab. contiguous : bool If True (default) and the data and parameters are compatible with previous ones, if any, the image data are stored contiguously after the previous one. In that case, 'photometric', 'planarconfig', 'rowsperstrip', are ignored. Metadata such as 'description', 'metadata', 'datetime', and 'extratags' are written to the first page of a contiguous series only. align : int Byte boundary on which to align the image data in the file. Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. Following contiguous writes are not aligned. truncate : bool If True, only write the first page including shape metadata if possible (uncompressed, contiguous, not tiled). Other TIFF readers will only be able to read part of the data. compress : int or str or (str, int) If 0 (default), data are written uncompressed. If 0-9, the level of ADOBE_DEFLATE compression. If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'. If a tuple, first item is one of TIFF.COMPRESSION and second item is compression level. Compression cannot be used to write contiguous files. rowsperstrip : int The number of rows per strip. By default strips will be ~64 KB if compression is enabled, else rowsperstrip is set to the image length. Bilevel images are always stored in one strip per plane. predictor : bool If True, apply horizontal differencing or floating-point predictor before compression. colormap : numpy.ndarray RGB color values for the corresponding data value. Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. description : str The subject of the image. Must be 7-bit ASCII. Cannot be used with the ImageJ format. Saved with the first page only. datetime : datetime, str, or bool Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or datetime object. Else if True, the current date and time is used. Saved with the first page only. resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) X and Y resolutions in pixels per resolution unit as float or rational numbers. A third, optional parameter specifies the resolution unit, which must be None (default for ImageJ), 'INCH' (default), or 'CENTIMETER'. subfiletype : int Bitfield to indicate the kind of data. Set bit 0 if the image is a reduced-resolution version of another image. Set bit 1 if the image is part of a multi-page image. Set bit 2 if the image is transparency mask for another image (photometric must be MASK, SamplesPerPixel and BitsPerSample must be 1). software : str Name of the software used to create the file. Must be 7-bit ASCII. Saved with the first page only. metadata : dict Additional metadata to be saved along with shape information in JSON or ImageJ formats in an ImageDescription tag. If None, do not write a second ImageDescription tag. Strings must be 7-bit ASCII. Saved with the first page only. ijmetadata : dict Additional metadata to be saved in application specific IJMetadata and IJMetadataByteCounts tags. Refer to the imagej_metadata_tag function for valid keys and values. Saved with the first page only. extratags : sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. count : int Number of data values. Not used for string or byte string values. value : sequence 'Count' values compatible with 'dtype'. Byte strings must contain count values of dtype packed as binary data. writeonce : bool If True, the tag is written to the first page only. """ # TODO: refactor this function fh = self._fh byteorder = self._byteorder if data is None: if compress: raise ValueError('cannot save compressed empty file') datashape = shape datadtype = numpy.dtype(dtype).newbyteorder(byteorder) datadtypechar = datadtype.char else: data = numpy.asarray(data, byteorder+data.dtype.char, 'C') if data.size == 0: raise ValueError('cannot save empty array') datashape = data.shape datadtype = data.dtype datadtypechar = data.dtype.char returnoffset = returnoffset and datadtype.isnative bilevel = datadtypechar == '?' if bilevel: index = -1 if datashape[-1] > 1 else -2 datasize = product(datashape[:index]) if datashape[index] % 8: datasize *= datashape[index] // 8 + 1 else: datasize *= datashape[index] // 8 else: datasize = product(datashape) * datadtype.itemsize # just append contiguous data if possible self._truncate = bool(truncate) if self._datashape: if (not contiguous or self._datashape[1:] != datashape or self._datadtype != datadtype or (compress and self._tags) or tile or not numpy.array_equal(colormap, self._colormap)): # incompatible shape, dtype, compression mode, or colormap self._write_remaining_pages() self._write_image_description() self._truncate = False self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._datashape = None self._colormap = None if self._imagej: raise ValueError( 'ImageJ does not support non-contiguous data') else: # consecutive mode self._datashape = (self._datashape[0] + 1,) + datashape if not compress: # write contiguous data, write IFDs/tags later offset = fh.tell() if data is None: fh.write_empty(datasize) else: fh.write_array(data) if returnoffset: return offset, datasize return None input_shape = datashape tagnoformat = self._tagnoformat valueformat = self._valueformat offsetformat = self._offsetformat offsetsize = self._offsetsize tagsize = self._tagsize MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK MINISWHITE = TIFF.PHOTOMETRIC.MINISWHITE RGB = TIFF.PHOTOMETRIC.RGB CFA = TIFF.PHOTOMETRIC.CFA PALETTE = TIFF.PHOTOMETRIC.PALETTE CONTIG = TIFF.PLANARCONFIG.CONTIG SEPARATE = TIFF.PLANARCONFIG.SEPARATE # parse input if photometric is not None: photometric = enumarg(TIFF.PHOTOMETRIC, photometric) if planarconfig: planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) if extrasamples is None: extrasamples_ = None else: extrasamples_ = tuple(enumarg(TIFF.EXTRASAMPLE, es) for es in sequence(extrasamples)) if not compress: compress = False compresstag = 1 # TODO: support predictors without compression predictor = False predictortag = 1 else: if isinstance(compress, (tuple, list)): compress, compresslevel = compress elif isinstance(compress, int): compress, compresslevel = 'ADOBE_DEFLATE', int(compress) if not 0 <= compresslevel <= 9: raise ValueError('invalid compression level %s' % compress) else: compresslevel = None compress = compress.upper() compresstag = enumarg(TIFF.COMPRESSION, compress) if predictor: if datadtype.kind in 'iu': predictortag = 2 predictor = TIFF.PREDICTORS[2] elif datadtype.kind == 'f': predictortag = 3 predictor = TIFF.PREDICTORS[3] else: raise ValueError('cannot apply predictor to %s' % datadtype) # prepare ImageJ format if self._imagej: # if predictor or compress: # warnings.warn( # 'ImageJ cannot handle predictors or compression') if description: warnings.warn('not writing description to ImageJ file') description = None volume = False if datadtypechar not in 'BHhf': raise ValueError( 'ImageJ does not support data type %s' % datadtypechar) ijrgb = photometric == RGB if photometric else None if datadtypechar not in 'B': ijrgb = False ijshape = imagej_shape(datashape, ijrgb) if ijshape[-1] in (3, 4): photometric = RGB if datadtypechar not in 'B': raise ValueError('ImageJ does not support data type %s ' 'for RGB' % datadtypechar) elif photometric is None: photometric = MINISBLACK planarconfig = None if planarconfig == SEPARATE: raise ValueError('ImageJ does not support planar images') planarconfig = CONTIG if ijrgb else None # define compress function if compress: compressor = TIFF.COMPESSORS[compresstag] if predictor: def compress(data, level=compresslevel): data = predictor(data, axis=-2) return compressor(data, level) else: def compress(data, level=compresslevel): return compressor(data, level) # verify colormap and indices if colormap is not None: if datadtypechar not in 'BH': raise ValueError('invalid data dtype for palette mode') colormap = numpy.asarray(colormap, dtype=byteorder+'H') if colormap.shape != (3, 2**(datadtype.itemsize * 8)): raise ValueError('invalid color map shape') self._colormap = colormap # verify tile shape if tile: tile = tuple(int(i) for i in tile[:3]) volume = len(tile) == 3 if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or any(i < 1 for i in tile)): raise ValueError('invalid tile shape') else: tile = () volume = False # normalize data shape to 5D or 6D, depending on volume: # (pages, planar_samples, [depth,] height, width, contig_samples) datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) shape = datashape ndim = len(datashape) samplesperpixel = 1 extrasamples = 0 if volume and ndim < 3: volume = False if colormap is not None: photometric = PALETTE planarconfig = None if photometric is None: photometric = MINISBLACK if bilevel: photometric = MINISWHITE elif planarconfig == CONTIG: if ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif planarconfig == SEPARATE: if volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB elif ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif self._imagej: photometric = MINISBLACK elif volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB if planarconfig and len(shape) <= (3 if volume else 2): planarconfig = None if photometric not in (0, 1, 3, 4): photometric = MINISBLACK if photometric == RGB: if len(shape) < 3: raise ValueError('not a RGB(A) image') if len(shape) < 4: volume = False if planarconfig is None: if shape[-1] in (3, 4): planarconfig = CONTIG elif shape[-4 if volume else -3] in (3, 4): planarconfig = SEPARATE elif shape[-1] > shape[-4 if volume else -3]: planarconfig = SEPARATE else: planarconfig = CONTIG if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] if samplesperpixel > 3: extrasamples = samplesperpixel - 3 elif photometric == CFA: if len(shape) != 2: raise ValueError('invalid CFA image') volume = False planarconfig = None datashape = (-1, 1) + shape[-2:] + (1,) if 50706 not in (et[0] for et in extratags): raise ValueError('must specify DNG tags for CFA image') elif planarconfig and len(shape) > (3 if volume else 2): if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] extrasamples = samplesperpixel - 1 else: planarconfig = None while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] # remove trailing 1s if len(shape) < 3: volume = False if extrasamples_ is None: datashape = (-1, 1) + shape[(-3 if volume else -2):] + (1,) else: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] extrasamples = samplesperpixel - 1 if subfiletype & 0b100: # FILETYPE_MASK if not (bilevel and samplesperpixel == 1 and photometric in (0, 1, 4)): raise ValueError('invalid SubfileType MASK') photometric = TIFF.PHOTOMETRIC.MASK # normalize shape to 6D assert len(datashape) in (5, 6) if len(datashape) == 5: datashape = datashape[:2] + (1,) + datashape[2:] if datashape[0] == -1: s0 = product(input_shape) // product(datashape[1:]) datashape = (s0,) + datashape[1:] shape = datashape if data is not None: data = data.reshape(shape) if tile and not volume: tile = (1, tile[-2], tile[-1]) if photometric == PALETTE: if (samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1): raise ValueError('invalid data shape for palette mode') if photometric == RGB and samplesperpixel == 2: raise ValueError('not a RGB image (samplesperpixel=2)') if bilevel: if compresstag not in (1, 32773): raise ValueError('cannot compress bilevel image') if tile: raise ValueError('cannot save tiled bilevel image') if photometric not in (0, 1, 4): raise ValueError('cannot save bilevel image as %s' % str(photometric)) datashape = list(datashape) if datashape[-2] % 8: datashape[-2] = datashape[-2] // 8 + 1 else: datashape[-2] = datashape[-2] // 8 datashape = tuple(datashape) assert datasize == product(datashape) if data is not None: data = numpy.packbits(data, axis=-2) assert datashape[-2] == data.shape[-2] bytestr = bytes if sys.version[0] == '2' else ( lambda x: bytes(x, 'ascii') if isinstance(x, str) else x) tags = [] # list of (code, ifdentry, ifdvalue, writeonce) strip_or_tile = 'Tile' if tile else 'Strip' tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + 'ByteCounts'] tagoffsets = TIFF.TAG_NAMES[strip_or_tile + 'Offsets'] self._tagoffsets = tagoffsets def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # Compute ifdentry & ifdvalue bytes from code, dtype, count, value # Append (code, ifdentry, ifdvalue, writeonce) to tags list code = int(TIFF.TAG_NAMES.get(code, code)) try: tifftype = TIFF.DATA_DTYPES[dtype] except KeyError: raise ValueError('unknown dtype %s' % dtype) rawcount = count if dtype == 's': # strings value = bytestr(value) + b'\0' count = rawcount = len(value) rawcount = value.find(b'\0\0') if rawcount < 0: rawcount = count else: rawcount += 1 # length of string without buffer value = (value,) elif isinstance(value, bytes): # packed binary data dtsize = struct.calcsize(dtype) if len(value) % dtsize: raise ValueError('invalid packed binary data') count = len(value) // dtsize if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] ifdentry = [pack('HH', code, tifftype), pack(offsetformat, rawcount)] ifdvalue = None if struct.calcsize(dtype) * count <= offsetsize: # value(s) can be written directly if isinstance(value, bytes): ifdentry.append(pack(valueformat, value)) elif count == 1: if isinstance(value, (tuple, list, numpy.ndarray)): value = value[0] ifdentry.append(pack(valueformat, pack(dtype, value))) else: ifdentry.append(pack(valueformat, pack(str(count)+dtype, *value))) else: # use offset to value(s) ifdentry.append(pack(offsetformat, 0)) if isinstance(value, bytes): ifdvalue = value elif isinstance(value, numpy.ndarray): assert value.size == count assert value.dtype.char == dtype ifdvalue = value.tostring() elif isinstance(value, (tuple, list)): ifdvalue = pack(str(count)+dtype, *value) else: ifdvalue = pack(dtype, value) tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): """"Return nominator and denominator from float or two integers.""" from fractions import Fraction # delayed import try: f = Fraction.from_float(arg) except TypeError: f = Fraction(arg[0], arg[1]) f = f.limit_denominator(max_denominator) return f.numerator, f.denominator if description: # user provided description addtag('ImageDescription', 's', 0, description, writeonce=True) # write shape and metadata to ImageDescription self._metadata = {} if not metadata else metadata.copy() if self._imagej: description = imagej_description( input_shape, shape[-1] in (3, 4), self._colormap is not None, **self._metadata) elif metadata or metadata == {}: if self._truncate: self._metadata.update(truncated=True) description = json_description(input_shape, **self._metadata) # elif metadata is None and self._truncate: # raise ValueError('cannot truncate without writing metadata') else: description = None if description: # add 64 bytes buffer # the image description might be updated later with the final shape description = str2bytes(description, 'ascii') description += b'\0' * 64 self._descriptionlen = len(description) addtag('ImageDescription', 's', 0, description, writeonce=True) if software: addtag('Software', 's', 0, software, writeonce=True) if datetime: if isinstance(datetime, str): if len(datetime) != 19 or datetime[16] != ':': raise ValueError('invalid datetime string') else: try: datetime = datetime.strftime('%Y:%m:%d %H:%M:%S') except AttributeError: datetime = self._now().strftime('%Y:%m:%d %H:%M:%S') addtag('DateTime', 's', 0, datetime, writeonce=True) addtag('Compression', 'H', 1, compresstag) if predictor: addtag('Predictor', 'H', 1, predictortag) addtag('ImageWidth', 'I', 1, shape[-2]) addtag('ImageLength', 'I', 1, shape[-3]) if tile: addtag('TileWidth', 'I', 1, tile[-1]) addtag('TileLength', 'I', 1, tile[-2]) if tile[0] > 1: addtag('ImageDepth', 'I', 1, shape[-4]) addtag('TileDepth', 'I', 1, tile[0]) addtag('NewSubfileType', 'I', 1, subfiletype) if not bilevel: sampleformat = {'u': 1, 'i': 2, 'f': 3, 'c': 6}[datadtype.kind] addtag('SampleFormat', 'H', samplesperpixel, (sampleformat,) * samplesperpixel) addtag('PhotometricInterpretation', 'H', 1, photometric.value) if colormap is not None: addtag('ColorMap', 'H', colormap.size, colormap) addtag('SamplesPerPixel', 'H', 1, samplesperpixel) if bilevel: pass elif planarconfig and samplesperpixel > 1: addtag('PlanarConfiguration', 'H', 1, planarconfig.value) addtag('BitsPerSample', 'H', samplesperpixel, (datadtype.itemsize * 8,) * samplesperpixel) else: addtag('BitsPerSample', 'H', 1, datadtype.itemsize * 8) if extrasamples: if extrasamples_ is not None: if extrasamples != len(extrasamples_): raise ValueError('wrong number of extrasamples specified') addtag('ExtraSamples', 'H', extrasamples, extrasamples_) elif photometric == RGB and extrasamples == 1: # Unassociated alpha channel addtag('ExtraSamples', 'H', 1, 2) else: # Unspecified alpha channel addtag('ExtraSamples', 'H', extrasamples, (0,) * extrasamples) if resolution is not None: addtag('XResolution', '2I', 1, rational(resolution[0])) addtag('YResolution', '2I', 1, rational(resolution[1])) if len(resolution) > 2: unit = resolution[2] unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) elif self._imagej: unit = 1 else: unit = 2 addtag('ResolutionUnit', 'H', 1, unit) elif not self._imagej: addtag('XResolution', '2I', 1, (1, 1)) addtag('YResolution', '2I', 1, (1, 1)) addtag('ResolutionUnit', 'H', 1, 1) if ijmetadata: for t in imagej_metadata_tag(ijmetadata, byteorder): addtag(*t) contiguous = not compress if tile: # one chunk per tile per plane tiles = ((shape[2] + tile[0] - 1) // tile[0], (shape[3] + tile[1] - 1) // tile[1], (shape[4] + tile[2] - 1) // tile[2]) numtiles = product(tiles) * shape[1] databytecounts = [ product(tile) * shape[-1] * datadtype.itemsize] * numtiles addtag(tagbytecounts, offsetformat, numtiles, databytecounts) addtag(tagoffsets, offsetformat, numtiles, [0] * numtiles) contiguous = contiguous and product(tiles) == 1 if not contiguous: # allocate tile buffer chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) elif contiguous and (bilevel or rowsperstrip is None): # one strip per plane if bilevel: databytecounts = [product(datashape[2:])] * shape[1] else: databytecounts = [ product(datashape[2:]) * datadtype.itemsize] * shape[1] addtag(tagbytecounts, offsetformat, shape[1], databytecounts) addtag(tagoffsets, offsetformat, shape[1], [0] * shape[1]) addtag('RowsPerStrip', 'I', 1, shape[-3]) else: # use rowsperstrip rowsize = product(shape[-2:]) * datadtype.itemsize if rowsperstrip is None: # compress ~64 KB chunks by default rowsperstrip = 65536 // rowsize if compress else shape[-3] if rowsperstrip < 1: rowsperstrip = 1 elif rowsperstrip > shape[-3]: rowsperstrip = shape[-3] addtag('RowsPerStrip', 'I', 1, rowsperstrip) numstrips1 = (shape[-3] + rowsperstrip - 1) // rowsperstrip numstrips = numstrips1 * shape[1] if compress: databytecounts = [0] * numstrips else: # TODO: save bilevel data with rowsperstrip stripsize = rowsperstrip * rowsize databytecounts = [stripsize] * numstrips stripsize -= rowsize * (numstrips1 * rowsperstrip - shape[-3]) for i in range(numstrips1-1, numstrips, numstrips1): databytecounts[i] = stripsize addtag(tagbytecounts, offsetformat, numstrips, databytecounts) addtag(tagoffsets, offsetformat, numstrips, [0] * numstrips) if data is None and not contiguous: raise ValueError('cannot write non-contiguous empty file') # add extra tags from user for t in extratags: addtag(*t) # TODO: check TIFFReadDirectoryCheckOrder warning in files containing # multiple tags of same code # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) fhpos = fh.tell() if not (self._bigtiff or self._imagej) and fhpos + datasize > 2**32-1: raise ValueError('data too large for standard TIFF file') # if not compressed or multi-tiled, write the first IFD and then # all data contiguously; else, write all IFDs and data interleaved for pageindex in range(1 if contiguous else shape[0]): ifdpos = fhpos if ifdpos % 2: # location of IFD must begin on a word boundary fh.write(b'\0') ifdpos += 1 # update pointer at ifdoffset fh.seek(self._ifdoffset) fh.write(pack(offsetformat, ifdpos)) fh.seek(ifdpos) # create IFD in memory if pageindex < 2: ifd = io.BytesIO() ifd.write(pack(tagnoformat, len(tags))) tagoffset = ifd.tell() ifd.write(b''.join(t[1] for t in tags)) ifdoffset = ifd.tell() ifd.write(pack(offsetformat, 0)) # offset to next IFD # write tag values and patch offsets in ifdentries for tagindex, tag in enumerate(tags): offset = tagoffset + tagindex * tagsize + offsetsize + 4 code = tag[0] value = tag[2] if value: pos = ifd.tell() if pos % 2: # tag value is expected to begin on word boundary ifd.write(b'\0') pos += 1 ifd.seek(offset) ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) ifd.write(value) if code == tagoffsets: dataoffsetsoffset = offset, pos elif code == tagbytecounts: databytecountsoffset = offset, pos elif code == 270 and value.endswith(b'\0\0\0\0'): # image description buffer self._descriptionoffset = ifdpos + pos self._descriptionlenoffset = ( ifdpos + tagoffset + tagindex*tagsize + 4) elif code == tagoffsets: dataoffsetsoffset = offset, None elif code == tagbytecounts: databytecountsoffset = offset, None ifdsize = ifd.tell() if ifdsize % 2: ifd.write(b'\0') ifdsize += 1 # write IFD later when strip/tile bytecounts and offsets are known fh.seek(ifdsize, 1) # write image data dataoffset = fh.tell() skip = align - dataoffset % align fh.seek(skip, 1) dataoffset += skip if contiguous: if data is None: fh.write_empty(datasize) else: fh.write_array(data) elif tile: if data is None: fh.write_empty(numtiles * databytecounts[0]) else: stripindex = 0 for plane in data[pageindex]: for tz in range(tiles[0]): for ty in range(tiles[1]): for tx in range(tiles[2]): c0 = min(tile[0], shape[2] - tz*tile[0]) c1 = min(tile[1], shape[3] - ty*tile[1]) c2 = min(tile[2], shape[4] - tx*tile[2]) chunk[c0:, c1:, c2:] = 0 chunk[:c0, :c1, :c2] = plane[ tz*tile[0]:tz*tile[0]+c0, ty*tile[1]:ty*tile[1]+c1, tx*tile[2]:tx*tile[2]+c2] if compress: t = compress(chunk) fh.write(t) databytecounts[stripindex] = len(t) stripindex += 1 else: fh.write_array(chunk) # fh.flush() elif compress: # write one strip per rowsperstrip assert data.shape[2] == 1 # not handling depth numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip stripindex = 0 for plane in data[pageindex]: for i in range(numstrips): strip = plane[0, i*rowsperstrip: (i+1)*rowsperstrip] strip = compress(strip) fh.write(strip) databytecounts[stripindex] = len(strip) stripindex += 1 else: fh.write_array(data[pageindex]) # update strip/tile offsets offset, pos = dataoffsetsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) offset = dataoffset for size in databytecounts: ifd.write(pack(offsetformat, offset)) offset += size else: ifd.write(pack(offsetformat, dataoffset)) if compress: # update strip/tile bytecounts offset, pos = databytecountsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) for size in databytecounts: ifd.write(pack(offsetformat, size)) else: ifd.write(pack(offsetformat, databytecounts[0])) fhpos = fh.tell() fh.seek(ifdpos) fh.write(iogetbuffer(ifd)) fh.flush() fh.seek(fhpos) self._ifdoffset = ifdpos + ifdoffset # remove tags that should be written only once if pageindex == 0: tags = [tag for tag in tags if not tag[-1]] self._shape = shape self._datashape = (1,) + input_shape self._datadtype = datadtype self._dataoffset = dataoffset self._databytecounts = databytecounts if contiguous: # write remaining IFDs/tags later self._tags = tags # return offset and size of image data if returnoffset: return dataoffset, sum(databytecounts) return None
python
def save(self, data=None, shape=None, dtype=None, returnoffset=False, photometric=None, planarconfig=None, extrasamples=None, tile=None, contiguous=True, align=16, truncate=False, compress=0, rowsperstrip=None, predictor=False, colormap=None, description=None, datetime=None, resolution=None, subfiletype=0, software='tifffile.py', metadata={}, ijmetadata=None, extratags=()): """Write numpy array and tags to TIFF file. The data shape's last dimensions are assumed to be image depth, height (length), width, and samples. If a colormap is provided, the data's dtype must be uint8 or uint16 and the data values are indices into the last dimension of the colormap. If 'shape' and 'dtype' are specified, an empty array is saved. This option cannot be used with compression or multiple tiles. Image data are written uncompressed in one strip per plane by default. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The SampleFormat and BitsPerSample tags are derived from the data type. Parameters ---------- data : numpy.ndarray or None Input image array. shape : tuple or None Shape of the empty array to save. Used only if 'data' is None. dtype : numpy.dtype or None Data-type of the empty array to save. Used only if 'data' is None. returnoffset : bool If True and the image data in the file is memory-mappable, return the offset and number of bytes of the image data in the file. photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} The color space of the image data. By default, this setting is inferred from the data shape and the value of colormap. For CFA images, DNG tags must be specified in 'extratags'. planarconfig : {'CONTIG', 'SEPARATE'} Specifies if samples are stored interleaved or in separate planes. By default, this setting is inferred from the data shape. If this parameter is set, extra samples are used to store grayscale images. 'CONTIG': last dimension contains samples. 'SEPARATE': third last dimension contains samples. extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'} Defines the interpretation of extra components in pixels. 'UNSPECIFIED': no transparency information (default). 'ASSOCALPHA': single, true transparency with pre-multiplied color. 'UNASSALPHA': independent transparency masks. tile : tuple of int The shape (depth, length, width) of image tiles to write. If None (default), image data are written in strips. The tile length and width must be a multiple of 16. If the tile depth is provided, the SGI ImageDepth and TileDepth tags are used to save volume data. Unless a single tile is used, tiles cannot be used to write contiguous files. Few software can read the SGI format, e.g. MeVisLab. contiguous : bool If True (default) and the data and parameters are compatible with previous ones, if any, the image data are stored contiguously after the previous one. In that case, 'photometric', 'planarconfig', 'rowsperstrip', are ignored. Metadata such as 'description', 'metadata', 'datetime', and 'extratags' are written to the first page of a contiguous series only. align : int Byte boundary on which to align the image data in the file. Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. Following contiguous writes are not aligned. truncate : bool If True, only write the first page including shape metadata if possible (uncompressed, contiguous, not tiled). Other TIFF readers will only be able to read part of the data. compress : int or str or (str, int) If 0 (default), data are written uncompressed. If 0-9, the level of ADOBE_DEFLATE compression. If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'. If a tuple, first item is one of TIFF.COMPRESSION and second item is compression level. Compression cannot be used to write contiguous files. rowsperstrip : int The number of rows per strip. By default strips will be ~64 KB if compression is enabled, else rowsperstrip is set to the image length. Bilevel images are always stored in one strip per plane. predictor : bool If True, apply horizontal differencing or floating-point predictor before compression. colormap : numpy.ndarray RGB color values for the corresponding data value. Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. description : str The subject of the image. Must be 7-bit ASCII. Cannot be used with the ImageJ format. Saved with the first page only. datetime : datetime, str, or bool Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or datetime object. Else if True, the current date and time is used. Saved with the first page only. resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) X and Y resolutions in pixels per resolution unit as float or rational numbers. A third, optional parameter specifies the resolution unit, which must be None (default for ImageJ), 'INCH' (default), or 'CENTIMETER'. subfiletype : int Bitfield to indicate the kind of data. Set bit 0 if the image is a reduced-resolution version of another image. Set bit 1 if the image is part of a multi-page image. Set bit 2 if the image is transparency mask for another image (photometric must be MASK, SamplesPerPixel and BitsPerSample must be 1). software : str Name of the software used to create the file. Must be 7-bit ASCII. Saved with the first page only. metadata : dict Additional metadata to be saved along with shape information in JSON or ImageJ formats in an ImageDescription tag. If None, do not write a second ImageDescription tag. Strings must be 7-bit ASCII. Saved with the first page only. ijmetadata : dict Additional metadata to be saved in application specific IJMetadata and IJMetadataByteCounts tags. Refer to the imagej_metadata_tag function for valid keys and values. Saved with the first page only. extratags : sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. count : int Number of data values. Not used for string or byte string values. value : sequence 'Count' values compatible with 'dtype'. Byte strings must contain count values of dtype packed as binary data. writeonce : bool If True, the tag is written to the first page only. """ # TODO: refactor this function fh = self._fh byteorder = self._byteorder if data is None: if compress: raise ValueError('cannot save compressed empty file') datashape = shape datadtype = numpy.dtype(dtype).newbyteorder(byteorder) datadtypechar = datadtype.char else: data = numpy.asarray(data, byteorder+data.dtype.char, 'C') if data.size == 0: raise ValueError('cannot save empty array') datashape = data.shape datadtype = data.dtype datadtypechar = data.dtype.char returnoffset = returnoffset and datadtype.isnative bilevel = datadtypechar == '?' if bilevel: index = -1 if datashape[-1] > 1 else -2 datasize = product(datashape[:index]) if datashape[index] % 8: datasize *= datashape[index] // 8 + 1 else: datasize *= datashape[index] // 8 else: datasize = product(datashape) * datadtype.itemsize # just append contiguous data if possible self._truncate = bool(truncate) if self._datashape: if (not contiguous or self._datashape[1:] != datashape or self._datadtype != datadtype or (compress and self._tags) or tile or not numpy.array_equal(colormap, self._colormap)): # incompatible shape, dtype, compression mode, or colormap self._write_remaining_pages() self._write_image_description() self._truncate = False self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._datashape = None self._colormap = None if self._imagej: raise ValueError( 'ImageJ does not support non-contiguous data') else: # consecutive mode self._datashape = (self._datashape[0] + 1,) + datashape if not compress: # write contiguous data, write IFDs/tags later offset = fh.tell() if data is None: fh.write_empty(datasize) else: fh.write_array(data) if returnoffset: return offset, datasize return None input_shape = datashape tagnoformat = self._tagnoformat valueformat = self._valueformat offsetformat = self._offsetformat offsetsize = self._offsetsize tagsize = self._tagsize MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK MINISWHITE = TIFF.PHOTOMETRIC.MINISWHITE RGB = TIFF.PHOTOMETRIC.RGB CFA = TIFF.PHOTOMETRIC.CFA PALETTE = TIFF.PHOTOMETRIC.PALETTE CONTIG = TIFF.PLANARCONFIG.CONTIG SEPARATE = TIFF.PLANARCONFIG.SEPARATE # parse input if photometric is not None: photometric = enumarg(TIFF.PHOTOMETRIC, photometric) if planarconfig: planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) if extrasamples is None: extrasamples_ = None else: extrasamples_ = tuple(enumarg(TIFF.EXTRASAMPLE, es) for es in sequence(extrasamples)) if not compress: compress = False compresstag = 1 # TODO: support predictors without compression predictor = False predictortag = 1 else: if isinstance(compress, (tuple, list)): compress, compresslevel = compress elif isinstance(compress, int): compress, compresslevel = 'ADOBE_DEFLATE', int(compress) if not 0 <= compresslevel <= 9: raise ValueError('invalid compression level %s' % compress) else: compresslevel = None compress = compress.upper() compresstag = enumarg(TIFF.COMPRESSION, compress) if predictor: if datadtype.kind in 'iu': predictortag = 2 predictor = TIFF.PREDICTORS[2] elif datadtype.kind == 'f': predictortag = 3 predictor = TIFF.PREDICTORS[3] else: raise ValueError('cannot apply predictor to %s' % datadtype) # prepare ImageJ format if self._imagej: # if predictor or compress: # warnings.warn( # 'ImageJ cannot handle predictors or compression') if description: warnings.warn('not writing description to ImageJ file') description = None volume = False if datadtypechar not in 'BHhf': raise ValueError( 'ImageJ does not support data type %s' % datadtypechar) ijrgb = photometric == RGB if photometric else None if datadtypechar not in 'B': ijrgb = False ijshape = imagej_shape(datashape, ijrgb) if ijshape[-1] in (3, 4): photometric = RGB if datadtypechar not in 'B': raise ValueError('ImageJ does not support data type %s ' 'for RGB' % datadtypechar) elif photometric is None: photometric = MINISBLACK planarconfig = None if planarconfig == SEPARATE: raise ValueError('ImageJ does not support planar images') planarconfig = CONTIG if ijrgb else None # define compress function if compress: compressor = TIFF.COMPESSORS[compresstag] if predictor: def compress(data, level=compresslevel): data = predictor(data, axis=-2) return compressor(data, level) else: def compress(data, level=compresslevel): return compressor(data, level) # verify colormap and indices if colormap is not None: if datadtypechar not in 'BH': raise ValueError('invalid data dtype for palette mode') colormap = numpy.asarray(colormap, dtype=byteorder+'H') if colormap.shape != (3, 2**(datadtype.itemsize * 8)): raise ValueError('invalid color map shape') self._colormap = colormap # verify tile shape if tile: tile = tuple(int(i) for i in tile[:3]) volume = len(tile) == 3 if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or any(i < 1 for i in tile)): raise ValueError('invalid tile shape') else: tile = () volume = False # normalize data shape to 5D or 6D, depending on volume: # (pages, planar_samples, [depth,] height, width, contig_samples) datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) shape = datashape ndim = len(datashape) samplesperpixel = 1 extrasamples = 0 if volume and ndim < 3: volume = False if colormap is not None: photometric = PALETTE planarconfig = None if photometric is None: photometric = MINISBLACK if bilevel: photometric = MINISWHITE elif planarconfig == CONTIG: if ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif planarconfig == SEPARATE: if volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB elif ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif self._imagej: photometric = MINISBLACK elif volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB if planarconfig and len(shape) <= (3 if volume else 2): planarconfig = None if photometric not in (0, 1, 3, 4): photometric = MINISBLACK if photometric == RGB: if len(shape) < 3: raise ValueError('not a RGB(A) image') if len(shape) < 4: volume = False if planarconfig is None: if shape[-1] in (3, 4): planarconfig = CONTIG elif shape[-4 if volume else -3] in (3, 4): planarconfig = SEPARATE elif shape[-1] > shape[-4 if volume else -3]: planarconfig = SEPARATE else: planarconfig = CONTIG if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] if samplesperpixel > 3: extrasamples = samplesperpixel - 3 elif photometric == CFA: if len(shape) != 2: raise ValueError('invalid CFA image') volume = False planarconfig = None datashape = (-1, 1) + shape[-2:] + (1,) if 50706 not in (et[0] for et in extratags): raise ValueError('must specify DNG tags for CFA image') elif planarconfig and len(shape) > (3 if volume else 2): if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] extrasamples = samplesperpixel - 1 else: planarconfig = None while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] # remove trailing 1s if len(shape) < 3: volume = False if extrasamples_ is None: datashape = (-1, 1) + shape[(-3 if volume else -2):] + (1,) else: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] extrasamples = samplesperpixel - 1 if subfiletype & 0b100: # FILETYPE_MASK if not (bilevel and samplesperpixel == 1 and photometric in (0, 1, 4)): raise ValueError('invalid SubfileType MASK') photometric = TIFF.PHOTOMETRIC.MASK # normalize shape to 6D assert len(datashape) in (5, 6) if len(datashape) == 5: datashape = datashape[:2] + (1,) + datashape[2:] if datashape[0] == -1: s0 = product(input_shape) // product(datashape[1:]) datashape = (s0,) + datashape[1:] shape = datashape if data is not None: data = data.reshape(shape) if tile and not volume: tile = (1, tile[-2], tile[-1]) if photometric == PALETTE: if (samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1): raise ValueError('invalid data shape for palette mode') if photometric == RGB and samplesperpixel == 2: raise ValueError('not a RGB image (samplesperpixel=2)') if bilevel: if compresstag not in (1, 32773): raise ValueError('cannot compress bilevel image') if tile: raise ValueError('cannot save tiled bilevel image') if photometric not in (0, 1, 4): raise ValueError('cannot save bilevel image as %s' % str(photometric)) datashape = list(datashape) if datashape[-2] % 8: datashape[-2] = datashape[-2] // 8 + 1 else: datashape[-2] = datashape[-2] // 8 datashape = tuple(datashape) assert datasize == product(datashape) if data is not None: data = numpy.packbits(data, axis=-2) assert datashape[-2] == data.shape[-2] bytestr = bytes if sys.version[0] == '2' else ( lambda x: bytes(x, 'ascii') if isinstance(x, str) else x) tags = [] # list of (code, ifdentry, ifdvalue, writeonce) strip_or_tile = 'Tile' if tile else 'Strip' tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + 'ByteCounts'] tagoffsets = TIFF.TAG_NAMES[strip_or_tile + 'Offsets'] self._tagoffsets = tagoffsets def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # Compute ifdentry & ifdvalue bytes from code, dtype, count, value # Append (code, ifdentry, ifdvalue, writeonce) to tags list code = int(TIFF.TAG_NAMES.get(code, code)) try: tifftype = TIFF.DATA_DTYPES[dtype] except KeyError: raise ValueError('unknown dtype %s' % dtype) rawcount = count if dtype == 's': # strings value = bytestr(value) + b'\0' count = rawcount = len(value) rawcount = value.find(b'\0\0') if rawcount < 0: rawcount = count else: rawcount += 1 # length of string without buffer value = (value,) elif isinstance(value, bytes): # packed binary data dtsize = struct.calcsize(dtype) if len(value) % dtsize: raise ValueError('invalid packed binary data') count = len(value) // dtsize if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] ifdentry = [pack('HH', code, tifftype), pack(offsetformat, rawcount)] ifdvalue = None if struct.calcsize(dtype) * count <= offsetsize: # value(s) can be written directly if isinstance(value, bytes): ifdentry.append(pack(valueformat, value)) elif count == 1: if isinstance(value, (tuple, list, numpy.ndarray)): value = value[0] ifdentry.append(pack(valueformat, pack(dtype, value))) else: ifdentry.append(pack(valueformat, pack(str(count)+dtype, *value))) else: # use offset to value(s) ifdentry.append(pack(offsetformat, 0)) if isinstance(value, bytes): ifdvalue = value elif isinstance(value, numpy.ndarray): assert value.size == count assert value.dtype.char == dtype ifdvalue = value.tostring() elif isinstance(value, (tuple, list)): ifdvalue = pack(str(count)+dtype, *value) else: ifdvalue = pack(dtype, value) tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): """"Return nominator and denominator from float or two integers.""" from fractions import Fraction # delayed import try: f = Fraction.from_float(arg) except TypeError: f = Fraction(arg[0], arg[1]) f = f.limit_denominator(max_denominator) return f.numerator, f.denominator if description: # user provided description addtag('ImageDescription', 's', 0, description, writeonce=True) # write shape and metadata to ImageDescription self._metadata = {} if not metadata else metadata.copy() if self._imagej: description = imagej_description( input_shape, shape[-1] in (3, 4), self._colormap is not None, **self._metadata) elif metadata or metadata == {}: if self._truncate: self._metadata.update(truncated=True) description = json_description(input_shape, **self._metadata) # elif metadata is None and self._truncate: # raise ValueError('cannot truncate without writing metadata') else: description = None if description: # add 64 bytes buffer # the image description might be updated later with the final shape description = str2bytes(description, 'ascii') description += b'\0' * 64 self._descriptionlen = len(description) addtag('ImageDescription', 's', 0, description, writeonce=True) if software: addtag('Software', 's', 0, software, writeonce=True) if datetime: if isinstance(datetime, str): if len(datetime) != 19 or datetime[16] != ':': raise ValueError('invalid datetime string') else: try: datetime = datetime.strftime('%Y:%m:%d %H:%M:%S') except AttributeError: datetime = self._now().strftime('%Y:%m:%d %H:%M:%S') addtag('DateTime', 's', 0, datetime, writeonce=True) addtag('Compression', 'H', 1, compresstag) if predictor: addtag('Predictor', 'H', 1, predictortag) addtag('ImageWidth', 'I', 1, shape[-2]) addtag('ImageLength', 'I', 1, shape[-3]) if tile: addtag('TileWidth', 'I', 1, tile[-1]) addtag('TileLength', 'I', 1, tile[-2]) if tile[0] > 1: addtag('ImageDepth', 'I', 1, shape[-4]) addtag('TileDepth', 'I', 1, tile[0]) addtag('NewSubfileType', 'I', 1, subfiletype) if not bilevel: sampleformat = {'u': 1, 'i': 2, 'f': 3, 'c': 6}[datadtype.kind] addtag('SampleFormat', 'H', samplesperpixel, (sampleformat,) * samplesperpixel) addtag('PhotometricInterpretation', 'H', 1, photometric.value) if colormap is not None: addtag('ColorMap', 'H', colormap.size, colormap) addtag('SamplesPerPixel', 'H', 1, samplesperpixel) if bilevel: pass elif planarconfig and samplesperpixel > 1: addtag('PlanarConfiguration', 'H', 1, planarconfig.value) addtag('BitsPerSample', 'H', samplesperpixel, (datadtype.itemsize * 8,) * samplesperpixel) else: addtag('BitsPerSample', 'H', 1, datadtype.itemsize * 8) if extrasamples: if extrasamples_ is not None: if extrasamples != len(extrasamples_): raise ValueError('wrong number of extrasamples specified') addtag('ExtraSamples', 'H', extrasamples, extrasamples_) elif photometric == RGB and extrasamples == 1: # Unassociated alpha channel addtag('ExtraSamples', 'H', 1, 2) else: # Unspecified alpha channel addtag('ExtraSamples', 'H', extrasamples, (0,) * extrasamples) if resolution is not None: addtag('XResolution', '2I', 1, rational(resolution[0])) addtag('YResolution', '2I', 1, rational(resolution[1])) if len(resolution) > 2: unit = resolution[2] unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) elif self._imagej: unit = 1 else: unit = 2 addtag('ResolutionUnit', 'H', 1, unit) elif not self._imagej: addtag('XResolution', '2I', 1, (1, 1)) addtag('YResolution', '2I', 1, (1, 1)) addtag('ResolutionUnit', 'H', 1, 1) if ijmetadata: for t in imagej_metadata_tag(ijmetadata, byteorder): addtag(*t) contiguous = not compress if tile: # one chunk per tile per plane tiles = ((shape[2] + tile[0] - 1) // tile[0], (shape[3] + tile[1] - 1) // tile[1], (shape[4] + tile[2] - 1) // tile[2]) numtiles = product(tiles) * shape[1] databytecounts = [ product(tile) * shape[-1] * datadtype.itemsize] * numtiles addtag(tagbytecounts, offsetformat, numtiles, databytecounts) addtag(tagoffsets, offsetformat, numtiles, [0] * numtiles) contiguous = contiguous and product(tiles) == 1 if not contiguous: # allocate tile buffer chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) elif contiguous and (bilevel or rowsperstrip is None): # one strip per plane if bilevel: databytecounts = [product(datashape[2:])] * shape[1] else: databytecounts = [ product(datashape[2:]) * datadtype.itemsize] * shape[1] addtag(tagbytecounts, offsetformat, shape[1], databytecounts) addtag(tagoffsets, offsetformat, shape[1], [0] * shape[1]) addtag('RowsPerStrip', 'I', 1, shape[-3]) else: # use rowsperstrip rowsize = product(shape[-2:]) * datadtype.itemsize if rowsperstrip is None: # compress ~64 KB chunks by default rowsperstrip = 65536 // rowsize if compress else shape[-3] if rowsperstrip < 1: rowsperstrip = 1 elif rowsperstrip > shape[-3]: rowsperstrip = shape[-3] addtag('RowsPerStrip', 'I', 1, rowsperstrip) numstrips1 = (shape[-3] + rowsperstrip - 1) // rowsperstrip numstrips = numstrips1 * shape[1] if compress: databytecounts = [0] * numstrips else: # TODO: save bilevel data with rowsperstrip stripsize = rowsperstrip * rowsize databytecounts = [stripsize] * numstrips stripsize -= rowsize * (numstrips1 * rowsperstrip - shape[-3]) for i in range(numstrips1-1, numstrips, numstrips1): databytecounts[i] = stripsize addtag(tagbytecounts, offsetformat, numstrips, databytecounts) addtag(tagoffsets, offsetformat, numstrips, [0] * numstrips) if data is None and not contiguous: raise ValueError('cannot write non-contiguous empty file') # add extra tags from user for t in extratags: addtag(*t) # TODO: check TIFFReadDirectoryCheckOrder warning in files containing # multiple tags of same code # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) fhpos = fh.tell() if not (self._bigtiff or self._imagej) and fhpos + datasize > 2**32-1: raise ValueError('data too large for standard TIFF file') # if not compressed or multi-tiled, write the first IFD and then # all data contiguously; else, write all IFDs and data interleaved for pageindex in range(1 if contiguous else shape[0]): ifdpos = fhpos if ifdpos % 2: # location of IFD must begin on a word boundary fh.write(b'\0') ifdpos += 1 # update pointer at ifdoffset fh.seek(self._ifdoffset) fh.write(pack(offsetformat, ifdpos)) fh.seek(ifdpos) # create IFD in memory if pageindex < 2: ifd = io.BytesIO() ifd.write(pack(tagnoformat, len(tags))) tagoffset = ifd.tell() ifd.write(b''.join(t[1] for t in tags)) ifdoffset = ifd.tell() ifd.write(pack(offsetformat, 0)) # offset to next IFD # write tag values and patch offsets in ifdentries for tagindex, tag in enumerate(tags): offset = tagoffset + tagindex * tagsize + offsetsize + 4 code = tag[0] value = tag[2] if value: pos = ifd.tell() if pos % 2: # tag value is expected to begin on word boundary ifd.write(b'\0') pos += 1 ifd.seek(offset) ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) ifd.write(value) if code == tagoffsets: dataoffsetsoffset = offset, pos elif code == tagbytecounts: databytecountsoffset = offset, pos elif code == 270 and value.endswith(b'\0\0\0\0'): # image description buffer self._descriptionoffset = ifdpos + pos self._descriptionlenoffset = ( ifdpos + tagoffset + tagindex*tagsize + 4) elif code == tagoffsets: dataoffsetsoffset = offset, None elif code == tagbytecounts: databytecountsoffset = offset, None ifdsize = ifd.tell() if ifdsize % 2: ifd.write(b'\0') ifdsize += 1 # write IFD later when strip/tile bytecounts and offsets are known fh.seek(ifdsize, 1) # write image data dataoffset = fh.tell() skip = align - dataoffset % align fh.seek(skip, 1) dataoffset += skip if contiguous: if data is None: fh.write_empty(datasize) else: fh.write_array(data) elif tile: if data is None: fh.write_empty(numtiles * databytecounts[0]) else: stripindex = 0 for plane in data[pageindex]: for tz in range(tiles[0]): for ty in range(tiles[1]): for tx in range(tiles[2]): c0 = min(tile[0], shape[2] - tz*tile[0]) c1 = min(tile[1], shape[3] - ty*tile[1]) c2 = min(tile[2], shape[4] - tx*tile[2]) chunk[c0:, c1:, c2:] = 0 chunk[:c0, :c1, :c2] = plane[ tz*tile[0]:tz*tile[0]+c0, ty*tile[1]:ty*tile[1]+c1, tx*tile[2]:tx*tile[2]+c2] if compress: t = compress(chunk) fh.write(t) databytecounts[stripindex] = len(t) stripindex += 1 else: fh.write_array(chunk) # fh.flush() elif compress: # write one strip per rowsperstrip assert data.shape[2] == 1 # not handling depth numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip stripindex = 0 for plane in data[pageindex]: for i in range(numstrips): strip = plane[0, i*rowsperstrip: (i+1)*rowsperstrip] strip = compress(strip) fh.write(strip) databytecounts[stripindex] = len(strip) stripindex += 1 else: fh.write_array(data[pageindex]) # update strip/tile offsets offset, pos = dataoffsetsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) offset = dataoffset for size in databytecounts: ifd.write(pack(offsetformat, offset)) offset += size else: ifd.write(pack(offsetformat, dataoffset)) if compress: # update strip/tile bytecounts offset, pos = databytecountsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) for size in databytecounts: ifd.write(pack(offsetformat, size)) else: ifd.write(pack(offsetformat, databytecounts[0])) fhpos = fh.tell() fh.seek(ifdpos) fh.write(iogetbuffer(ifd)) fh.flush() fh.seek(fhpos) self._ifdoffset = ifdpos + ifdoffset # remove tags that should be written only once if pageindex == 0: tags = [tag for tag in tags if not tag[-1]] self._shape = shape self._datashape = (1,) + input_shape self._datadtype = datadtype self._dataoffset = dataoffset self._databytecounts = databytecounts if contiguous: # write remaining IFDs/tags later self._tags = tags # return offset and size of image data if returnoffset: return dataoffset, sum(databytecounts) return None
[ "def", "save", "(", "self", ",", "data", "=", "None", ",", "shape", "=", "None", ",", "dtype", "=", "None", ",", "returnoffset", "=", "False", ",", "photometric", "=", "None", ",", "planarconfig", "=", "None", ",", "extrasamples", "=", "None", ",", "tile", "=", "None", ",", "contiguous", "=", "True", ",", "align", "=", "16", ",", "truncate", "=", "False", ",", "compress", "=", "0", ",", "rowsperstrip", "=", "None", ",", "predictor", "=", "False", ",", "colormap", "=", "None", ",", "description", "=", "None", ",", "datetime", "=", "None", ",", "resolution", "=", "None", ",", "subfiletype", "=", "0", ",", "software", "=", "'tifffile.py'", ",", "metadata", "=", "{", "}", ",", "ijmetadata", "=", "None", ",", "extratags", "=", "(", ")", ")", ":", "# TODO: refactor this function", "fh", "=", "self", ".", "_fh", "byteorder", "=", "self", ".", "_byteorder", "if", "data", "is", "None", ":", "if", "compress", ":", "raise", "ValueError", "(", "'cannot save compressed empty file'", ")", "datashape", "=", "shape", "datadtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", ".", "newbyteorder", "(", "byteorder", ")", "datadtypechar", "=", "datadtype", ".", "char", "else", ":", "data", "=", "numpy", ".", "asarray", "(", "data", ",", "byteorder", "+", "data", ".", "dtype", ".", "char", ",", "'C'", ")", "if", "data", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "'cannot save empty array'", ")", "datashape", "=", "data", ".", "shape", "datadtype", "=", "data", ".", "dtype", "datadtypechar", "=", "data", ".", "dtype", ".", "char", "returnoffset", "=", "returnoffset", "and", "datadtype", ".", "isnative", "bilevel", "=", "datadtypechar", "==", "'?'", "if", "bilevel", ":", "index", "=", "-", "1", "if", "datashape", "[", "-", "1", "]", ">", "1", "else", "-", "2", "datasize", "=", "product", "(", "datashape", "[", ":", "index", "]", ")", "if", "datashape", "[", "index", "]", "%", "8", ":", "datasize", "*=", "datashape", "[", "index", "]", "//", "8", "+", "1", "else", ":", "datasize", "*=", "datashape", "[", "index", "]", "//", "8", "else", ":", "datasize", "=", "product", "(", "datashape", ")", "*", "datadtype", ".", "itemsize", "# just append contiguous data if possible", "self", ".", "_truncate", "=", "bool", "(", "truncate", ")", "if", "self", ".", "_datashape", ":", "if", "(", "not", "contiguous", "or", "self", ".", "_datashape", "[", "1", ":", "]", "!=", "datashape", "or", "self", ".", "_datadtype", "!=", "datadtype", "or", "(", "compress", "and", "self", ".", "_tags", ")", "or", "tile", "or", "not", "numpy", ".", "array_equal", "(", "colormap", ",", "self", ".", "_colormap", ")", ")", ":", "# incompatible shape, dtype, compression mode, or colormap", "self", ".", "_write_remaining_pages", "(", ")", "self", ".", "_write_image_description", "(", ")", "self", ".", "_truncate", "=", "False", "self", ".", "_descriptionoffset", "=", "0", "self", ".", "_descriptionlenoffset", "=", "0", "self", ".", "_datashape", "=", "None", "self", ".", "_colormap", "=", "None", "if", "self", ".", "_imagej", ":", "raise", "ValueError", "(", "'ImageJ does not support non-contiguous data'", ")", "else", ":", "# consecutive mode", "self", ".", "_datashape", "=", "(", "self", ".", "_datashape", "[", "0", "]", "+", "1", ",", ")", "+", "datashape", "if", "not", "compress", ":", "# write contiguous data, write IFDs/tags later", "offset", "=", "fh", ".", "tell", "(", ")", "if", "data", "is", "None", ":", "fh", ".", "write_empty", "(", "datasize", ")", "else", ":", "fh", ".", "write_array", "(", "data", ")", "if", "returnoffset", ":", "return", "offset", ",", "datasize", "return", "None", "input_shape", "=", "datashape", "tagnoformat", "=", "self", ".", "_tagnoformat", "valueformat", "=", "self", ".", "_valueformat", "offsetformat", "=", "self", ".", "_offsetformat", "offsetsize", "=", "self", ".", "_offsetsize", "tagsize", "=", "self", ".", "_tagsize", "MINISBLACK", "=", "TIFF", ".", "PHOTOMETRIC", ".", "MINISBLACK", "MINISWHITE", "=", "TIFF", ".", "PHOTOMETRIC", ".", "MINISWHITE", "RGB", "=", "TIFF", ".", "PHOTOMETRIC", ".", "RGB", "CFA", "=", "TIFF", ".", "PHOTOMETRIC", ".", "CFA", "PALETTE", "=", "TIFF", ".", "PHOTOMETRIC", ".", "PALETTE", "CONTIG", "=", "TIFF", ".", "PLANARCONFIG", ".", "CONTIG", "SEPARATE", "=", "TIFF", ".", "PLANARCONFIG", ".", "SEPARATE", "# parse input", "if", "photometric", "is", "not", "None", ":", "photometric", "=", "enumarg", "(", "TIFF", ".", "PHOTOMETRIC", ",", "photometric", ")", "if", "planarconfig", ":", "planarconfig", "=", "enumarg", "(", "TIFF", ".", "PLANARCONFIG", ",", "planarconfig", ")", "if", "extrasamples", "is", "None", ":", "extrasamples_", "=", "None", "else", ":", "extrasamples_", "=", "tuple", "(", "enumarg", "(", "TIFF", ".", "EXTRASAMPLE", ",", "es", ")", "for", "es", "in", "sequence", "(", "extrasamples", ")", ")", "if", "not", "compress", ":", "compress", "=", "False", "compresstag", "=", "1", "# TODO: support predictors without compression", "predictor", "=", "False", "predictortag", "=", "1", "else", ":", "if", "isinstance", "(", "compress", ",", "(", "tuple", ",", "list", ")", ")", ":", "compress", ",", "compresslevel", "=", "compress", "elif", "isinstance", "(", "compress", ",", "int", ")", ":", "compress", ",", "compresslevel", "=", "'ADOBE_DEFLATE'", ",", "int", "(", "compress", ")", "if", "not", "0", "<=", "compresslevel", "<=", "9", ":", "raise", "ValueError", "(", "'invalid compression level %s'", "%", "compress", ")", "else", ":", "compresslevel", "=", "None", "compress", "=", "compress", ".", "upper", "(", ")", "compresstag", "=", "enumarg", "(", "TIFF", ".", "COMPRESSION", ",", "compress", ")", "if", "predictor", ":", "if", "datadtype", ".", "kind", "in", "'iu'", ":", "predictortag", "=", "2", "predictor", "=", "TIFF", ".", "PREDICTORS", "[", "2", "]", "elif", "datadtype", ".", "kind", "==", "'f'", ":", "predictortag", "=", "3", "predictor", "=", "TIFF", ".", "PREDICTORS", "[", "3", "]", "else", ":", "raise", "ValueError", "(", "'cannot apply predictor to %s'", "%", "datadtype", ")", "# prepare ImageJ format", "if", "self", ".", "_imagej", ":", "# if predictor or compress:", "# warnings.warn(", "# 'ImageJ cannot handle predictors or compression')", "if", "description", ":", "warnings", ".", "warn", "(", "'not writing description to ImageJ file'", ")", "description", "=", "None", "volume", "=", "False", "if", "datadtypechar", "not", "in", "'BHhf'", ":", "raise", "ValueError", "(", "'ImageJ does not support data type %s'", "%", "datadtypechar", ")", "ijrgb", "=", "photometric", "==", "RGB", "if", "photometric", "else", "None", "if", "datadtypechar", "not", "in", "'B'", ":", "ijrgb", "=", "False", "ijshape", "=", "imagej_shape", "(", "datashape", ",", "ijrgb", ")", "if", "ijshape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "if", "datadtypechar", "not", "in", "'B'", ":", "raise", "ValueError", "(", "'ImageJ does not support data type %s '", "'for RGB'", "%", "datadtypechar", ")", "elif", "photometric", "is", "None", ":", "photometric", "=", "MINISBLACK", "planarconfig", "=", "None", "if", "planarconfig", "==", "SEPARATE", ":", "raise", "ValueError", "(", "'ImageJ does not support planar images'", ")", "planarconfig", "=", "CONTIG", "if", "ijrgb", "else", "None", "# define compress function", "if", "compress", ":", "compressor", "=", "TIFF", ".", "COMPESSORS", "[", "compresstag", "]", "if", "predictor", ":", "def", "compress", "(", "data", ",", "level", "=", "compresslevel", ")", ":", "data", "=", "predictor", "(", "data", ",", "axis", "=", "-", "2", ")", "return", "compressor", "(", "data", ",", "level", ")", "else", ":", "def", "compress", "(", "data", ",", "level", "=", "compresslevel", ")", ":", "return", "compressor", "(", "data", ",", "level", ")", "# verify colormap and indices", "if", "colormap", "is", "not", "None", ":", "if", "datadtypechar", "not", "in", "'BH'", ":", "raise", "ValueError", "(", "'invalid data dtype for palette mode'", ")", "colormap", "=", "numpy", ".", "asarray", "(", "colormap", ",", "dtype", "=", "byteorder", "+", "'H'", ")", "if", "colormap", ".", "shape", "!=", "(", "3", ",", "2", "**", "(", "datadtype", ".", "itemsize", "*", "8", ")", ")", ":", "raise", "ValueError", "(", "'invalid color map shape'", ")", "self", ".", "_colormap", "=", "colormap", "# verify tile shape", "if", "tile", ":", "tile", "=", "tuple", "(", "int", "(", "i", ")", "for", "i", "in", "tile", "[", ":", "3", "]", ")", "volume", "=", "len", "(", "tile", ")", "==", "3", "if", "(", "len", "(", "tile", ")", "<", "2", "or", "tile", "[", "-", "1", "]", "%", "16", "or", "tile", "[", "-", "2", "]", "%", "16", "or", "any", "(", "i", "<", "1", "for", "i", "in", "tile", ")", ")", ":", "raise", "ValueError", "(", "'invalid tile shape'", ")", "else", ":", "tile", "=", "(", ")", "volume", "=", "False", "# normalize data shape to 5D or 6D, depending on volume:", "# (pages, planar_samples, [depth,] height, width, contig_samples)", "datashape", "=", "reshape_nd", "(", "datashape", ",", "3", "if", "photometric", "==", "RGB", "else", "2", ")", "shape", "=", "datashape", "ndim", "=", "len", "(", "datashape", ")", "samplesperpixel", "=", "1", "extrasamples", "=", "0", "if", "volume", "and", "ndim", "<", "3", ":", "volume", "=", "False", "if", "colormap", "is", "not", "None", ":", "photometric", "=", "PALETTE", "planarconfig", "=", "None", "if", "photometric", "is", "None", ":", "photometric", "=", "MINISBLACK", "if", "bilevel", ":", "photometric", "=", "MINISWHITE", "elif", "planarconfig", "==", "CONTIG", ":", "if", "ndim", ">", "2", "and", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "elif", "planarconfig", "==", "SEPARATE", ":", "if", "volume", "and", "ndim", ">", "3", "and", "shape", "[", "-", "4", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "elif", "ndim", ">", "2", "and", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "elif", "ndim", ">", "2", "and", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "elif", "self", ".", "_imagej", ":", "photometric", "=", "MINISBLACK", "elif", "volume", "and", "ndim", ">", "3", "and", "shape", "[", "-", "4", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "elif", "ndim", ">", "2", "and", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ":", "photometric", "=", "RGB", "if", "planarconfig", "and", "len", "(", "shape", ")", "<=", "(", "3", "if", "volume", "else", "2", ")", ":", "planarconfig", "=", "None", "if", "photometric", "not", "in", "(", "0", ",", "1", ",", "3", ",", "4", ")", ":", "photometric", "=", "MINISBLACK", "if", "photometric", "==", "RGB", ":", "if", "len", "(", "shape", ")", "<", "3", ":", "raise", "ValueError", "(", "'not a RGB(A) image'", ")", "if", "len", "(", "shape", ")", "<", "4", ":", "volume", "=", "False", "if", "planarconfig", "is", "None", ":", "if", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ":", "planarconfig", "=", "CONTIG", "elif", "shape", "[", "-", "4", "if", "volume", "else", "-", "3", "]", "in", "(", "3", ",", "4", ")", ":", "planarconfig", "=", "SEPARATE", "elif", "shape", "[", "-", "1", "]", ">", "shape", "[", "-", "4", "if", "volume", "else", "-", "3", "]", ":", "planarconfig", "=", "SEPARATE", "else", ":", "planarconfig", "=", "CONTIG", "if", "planarconfig", "==", "CONTIG", ":", "datashape", "=", "(", "-", "1", ",", "1", ")", "+", "shape", "[", "(", "-", "4", "if", "volume", "else", "-", "3", ")", ":", "]", "samplesperpixel", "=", "datashape", "[", "-", "1", "]", "else", ":", "datashape", "=", "(", "-", "1", ",", ")", "+", "shape", "[", "(", "-", "4", "if", "volume", "else", "-", "3", ")", ":", "]", "+", "(", "1", ",", ")", "samplesperpixel", "=", "datashape", "[", "1", "]", "if", "samplesperpixel", ">", "3", ":", "extrasamples", "=", "samplesperpixel", "-", "3", "elif", "photometric", "==", "CFA", ":", "if", "len", "(", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "'invalid CFA image'", ")", "volume", "=", "False", "planarconfig", "=", "None", "datashape", "=", "(", "-", "1", ",", "1", ")", "+", "shape", "[", "-", "2", ":", "]", "+", "(", "1", ",", ")", "if", "50706", "not", "in", "(", "et", "[", "0", "]", "for", "et", "in", "extratags", ")", ":", "raise", "ValueError", "(", "'must specify DNG tags for CFA image'", ")", "elif", "planarconfig", "and", "len", "(", "shape", ")", ">", "(", "3", "if", "volume", "else", "2", ")", ":", "if", "planarconfig", "==", "CONTIG", ":", "datashape", "=", "(", "-", "1", ",", "1", ")", "+", "shape", "[", "(", "-", "4", "if", "volume", "else", "-", "3", ")", ":", "]", "samplesperpixel", "=", "datashape", "[", "-", "1", "]", "else", ":", "datashape", "=", "(", "-", "1", ",", ")", "+", "shape", "[", "(", "-", "4", "if", "volume", "else", "-", "3", ")", ":", "]", "+", "(", "1", ",", ")", "samplesperpixel", "=", "datashape", "[", "1", "]", "extrasamples", "=", "samplesperpixel", "-", "1", "else", ":", "planarconfig", "=", "None", "while", "len", "(", "shape", ")", ">", "2", "and", "shape", "[", "-", "1", "]", "==", "1", ":", "shape", "=", "shape", "[", ":", "-", "1", "]", "# remove trailing 1s", "if", "len", "(", "shape", ")", "<", "3", ":", "volume", "=", "False", "if", "extrasamples_", "is", "None", ":", "datashape", "=", "(", "-", "1", ",", "1", ")", "+", "shape", "[", "(", "-", "3", "if", "volume", "else", "-", "2", ")", ":", "]", "+", "(", "1", ",", ")", "else", ":", "datashape", "=", "(", "-", "1", ",", "1", ")", "+", "shape", "[", "(", "-", "4", "if", "volume", "else", "-", "3", ")", ":", "]", "samplesperpixel", "=", "datashape", "[", "-", "1", "]", "extrasamples", "=", "samplesperpixel", "-", "1", "if", "subfiletype", "&", "0b100", ":", "# FILETYPE_MASK", "if", "not", "(", "bilevel", "and", "samplesperpixel", "==", "1", "and", "photometric", "in", "(", "0", ",", "1", ",", "4", ")", ")", ":", "raise", "ValueError", "(", "'invalid SubfileType MASK'", ")", "photometric", "=", "TIFF", ".", "PHOTOMETRIC", ".", "MASK", "# normalize shape to 6D", "assert", "len", "(", "datashape", ")", "in", "(", "5", ",", "6", ")", "if", "len", "(", "datashape", ")", "==", "5", ":", "datashape", "=", "datashape", "[", ":", "2", "]", "+", "(", "1", ",", ")", "+", "datashape", "[", "2", ":", "]", "if", "datashape", "[", "0", "]", "==", "-", "1", ":", "s0", "=", "product", "(", "input_shape", ")", "//", "product", "(", "datashape", "[", "1", ":", "]", ")", "datashape", "=", "(", "s0", ",", ")", "+", "datashape", "[", "1", ":", "]", "shape", "=", "datashape", "if", "data", "is", "not", "None", ":", "data", "=", "data", ".", "reshape", "(", "shape", ")", "if", "tile", "and", "not", "volume", ":", "tile", "=", "(", "1", ",", "tile", "[", "-", "2", "]", ",", "tile", "[", "-", "1", "]", ")", "if", "photometric", "==", "PALETTE", ":", "if", "(", "samplesperpixel", "!=", "1", "or", "extrasamples", "or", "shape", "[", "1", "]", "!=", "1", "or", "shape", "[", "-", "1", "]", "!=", "1", ")", ":", "raise", "ValueError", "(", "'invalid data shape for palette mode'", ")", "if", "photometric", "==", "RGB", "and", "samplesperpixel", "==", "2", ":", "raise", "ValueError", "(", "'not a RGB image (samplesperpixel=2)'", ")", "if", "bilevel", ":", "if", "compresstag", "not", "in", "(", "1", ",", "32773", ")", ":", "raise", "ValueError", "(", "'cannot compress bilevel image'", ")", "if", "tile", ":", "raise", "ValueError", "(", "'cannot save tiled bilevel image'", ")", "if", "photometric", "not", "in", "(", "0", ",", "1", ",", "4", ")", ":", "raise", "ValueError", "(", "'cannot save bilevel image as %s'", "%", "str", "(", "photometric", ")", ")", "datashape", "=", "list", "(", "datashape", ")", "if", "datashape", "[", "-", "2", "]", "%", "8", ":", "datashape", "[", "-", "2", "]", "=", "datashape", "[", "-", "2", "]", "//", "8", "+", "1", "else", ":", "datashape", "[", "-", "2", "]", "=", "datashape", "[", "-", "2", "]", "//", "8", "datashape", "=", "tuple", "(", "datashape", ")", "assert", "datasize", "==", "product", "(", "datashape", ")", "if", "data", "is", "not", "None", ":", "data", "=", "numpy", ".", "packbits", "(", "data", ",", "axis", "=", "-", "2", ")", "assert", "datashape", "[", "-", "2", "]", "==", "data", ".", "shape", "[", "-", "2", "]", "bytestr", "=", "bytes", "if", "sys", ".", "version", "[", "0", "]", "==", "'2'", "else", "(", "lambda", "x", ":", "bytes", "(", "x", ",", "'ascii'", ")", "if", "isinstance", "(", "x", ",", "str", ")", "else", "x", ")", "tags", "=", "[", "]", "# list of (code, ifdentry, ifdvalue, writeonce)", "strip_or_tile", "=", "'Tile'", "if", "tile", "else", "'Strip'", "tagbytecounts", "=", "TIFF", ".", "TAG_NAMES", "[", "strip_or_tile", "+", "'ByteCounts'", "]", "tagoffsets", "=", "TIFF", ".", "TAG_NAMES", "[", "strip_or_tile", "+", "'Offsets'", "]", "self", ".", "_tagoffsets", "=", "tagoffsets", "def", "pack", "(", "fmt", ",", "*", "val", ")", ":", "return", "struct", ".", "pack", "(", "byteorder", "+", "fmt", ",", "*", "val", ")", "def", "addtag", "(", "code", ",", "dtype", ",", "count", ",", "value", ",", "writeonce", "=", "False", ")", ":", "# Compute ifdentry & ifdvalue bytes from code, dtype, count, value", "# Append (code, ifdentry, ifdvalue, writeonce) to tags list", "code", "=", "int", "(", "TIFF", ".", "TAG_NAMES", ".", "get", "(", "code", ",", "code", ")", ")", "try", ":", "tifftype", "=", "TIFF", ".", "DATA_DTYPES", "[", "dtype", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'unknown dtype %s'", "%", "dtype", ")", "rawcount", "=", "count", "if", "dtype", "==", "'s'", ":", "# strings", "value", "=", "bytestr", "(", "value", ")", "+", "b'\\0'", "count", "=", "rawcount", "=", "len", "(", "value", ")", "rawcount", "=", "value", ".", "find", "(", "b'\\0\\0'", ")", "if", "rawcount", "<", "0", ":", "rawcount", "=", "count", "else", ":", "rawcount", "+=", "1", "# length of string without buffer", "value", "=", "(", "value", ",", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "# packed binary data", "dtsize", "=", "struct", ".", "calcsize", "(", "dtype", ")", "if", "len", "(", "value", ")", "%", "dtsize", ":", "raise", "ValueError", "(", "'invalid packed binary data'", ")", "count", "=", "len", "(", "value", ")", "//", "dtsize", "if", "len", "(", "dtype", ")", ">", "1", ":", "count", "*=", "int", "(", "dtype", "[", ":", "-", "1", "]", ")", "dtype", "=", "dtype", "[", "-", "1", "]", "ifdentry", "=", "[", "pack", "(", "'HH'", ",", "code", ",", "tifftype", ")", ",", "pack", "(", "offsetformat", ",", "rawcount", ")", "]", "ifdvalue", "=", "None", "if", "struct", ".", "calcsize", "(", "dtype", ")", "*", "count", "<=", "offsetsize", ":", "# value(s) can be written directly", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "ifdentry", ".", "append", "(", "pack", "(", "valueformat", ",", "value", ")", ")", "elif", "count", "==", "1", ":", "if", "isinstance", "(", "value", ",", "(", "tuple", ",", "list", ",", "numpy", ".", "ndarray", ")", ")", ":", "value", "=", "value", "[", "0", "]", "ifdentry", ".", "append", "(", "pack", "(", "valueformat", ",", "pack", "(", "dtype", ",", "value", ")", ")", ")", "else", ":", "ifdentry", ".", "append", "(", "pack", "(", "valueformat", ",", "pack", "(", "str", "(", "count", ")", "+", "dtype", ",", "*", "value", ")", ")", ")", "else", ":", "# use offset to value(s)", "ifdentry", ".", "append", "(", "pack", "(", "offsetformat", ",", "0", ")", ")", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "ifdvalue", "=", "value", "elif", "isinstance", "(", "value", ",", "numpy", ".", "ndarray", ")", ":", "assert", "value", ".", "size", "==", "count", "assert", "value", ".", "dtype", ".", "char", "==", "dtype", "ifdvalue", "=", "value", ".", "tostring", "(", ")", "elif", "isinstance", "(", "value", ",", "(", "tuple", ",", "list", ")", ")", ":", "ifdvalue", "=", "pack", "(", "str", "(", "count", ")", "+", "dtype", ",", "*", "value", ")", "else", ":", "ifdvalue", "=", "pack", "(", "dtype", ",", "value", ")", "tags", ".", "append", "(", "(", "code", ",", "b''", ".", "join", "(", "ifdentry", ")", ",", "ifdvalue", ",", "writeonce", ")", ")", "def", "rational", "(", "arg", ",", "max_denominator", "=", "1000000", ")", ":", "\"\"\"\"Return nominator and denominator from float or two integers.\"\"\"", "from", "fractions", "import", "Fraction", "# delayed import", "try", ":", "f", "=", "Fraction", ".", "from_float", "(", "arg", ")", "except", "TypeError", ":", "f", "=", "Fraction", "(", "arg", "[", "0", "]", ",", "arg", "[", "1", "]", ")", "f", "=", "f", ".", "limit_denominator", "(", "max_denominator", ")", "return", "f", ".", "numerator", ",", "f", ".", "denominator", "if", "description", ":", "# user provided description", "addtag", "(", "'ImageDescription'", ",", "'s'", ",", "0", ",", "description", ",", "writeonce", "=", "True", ")", "# write shape and metadata to ImageDescription", "self", ".", "_metadata", "=", "{", "}", "if", "not", "metadata", "else", "metadata", ".", "copy", "(", ")", "if", "self", ".", "_imagej", ":", "description", "=", "imagej_description", "(", "input_shape", ",", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ",", "self", ".", "_colormap", "is", "not", "None", ",", "*", "*", "self", ".", "_metadata", ")", "elif", "metadata", "or", "metadata", "==", "{", "}", ":", "if", "self", ".", "_truncate", ":", "self", ".", "_metadata", ".", "update", "(", "truncated", "=", "True", ")", "description", "=", "json_description", "(", "input_shape", ",", "*", "*", "self", ".", "_metadata", ")", "# elif metadata is None and self._truncate:", "# raise ValueError('cannot truncate without writing metadata')", "else", ":", "description", "=", "None", "if", "description", ":", "# add 64 bytes buffer", "# the image description might be updated later with the final shape", "description", "=", "str2bytes", "(", "description", ",", "'ascii'", ")", "description", "+=", "b'\\0'", "*", "64", "self", ".", "_descriptionlen", "=", "len", "(", "description", ")", "addtag", "(", "'ImageDescription'", ",", "'s'", ",", "0", ",", "description", ",", "writeonce", "=", "True", ")", "if", "software", ":", "addtag", "(", "'Software'", ",", "'s'", ",", "0", ",", "software", ",", "writeonce", "=", "True", ")", "if", "datetime", ":", "if", "isinstance", "(", "datetime", ",", "str", ")", ":", "if", "len", "(", "datetime", ")", "!=", "19", "or", "datetime", "[", "16", "]", "!=", "':'", ":", "raise", "ValueError", "(", "'invalid datetime string'", ")", "else", ":", "try", ":", "datetime", "=", "datetime", ".", "strftime", "(", "'%Y:%m:%d %H:%M:%S'", ")", "except", "AttributeError", ":", "datetime", "=", "self", ".", "_now", "(", ")", ".", "strftime", "(", "'%Y:%m:%d %H:%M:%S'", ")", "addtag", "(", "'DateTime'", ",", "'s'", ",", "0", ",", "datetime", ",", "writeonce", "=", "True", ")", "addtag", "(", "'Compression'", ",", "'H'", ",", "1", ",", "compresstag", ")", "if", "predictor", ":", "addtag", "(", "'Predictor'", ",", "'H'", ",", "1", ",", "predictortag", ")", "addtag", "(", "'ImageWidth'", ",", "'I'", ",", "1", ",", "shape", "[", "-", "2", "]", ")", "addtag", "(", "'ImageLength'", ",", "'I'", ",", "1", ",", "shape", "[", "-", "3", "]", ")", "if", "tile", ":", "addtag", "(", "'TileWidth'", ",", "'I'", ",", "1", ",", "tile", "[", "-", "1", "]", ")", "addtag", "(", "'TileLength'", ",", "'I'", ",", "1", ",", "tile", "[", "-", "2", "]", ")", "if", "tile", "[", "0", "]", ">", "1", ":", "addtag", "(", "'ImageDepth'", ",", "'I'", ",", "1", ",", "shape", "[", "-", "4", "]", ")", "addtag", "(", "'TileDepth'", ",", "'I'", ",", "1", ",", "tile", "[", "0", "]", ")", "addtag", "(", "'NewSubfileType'", ",", "'I'", ",", "1", ",", "subfiletype", ")", "if", "not", "bilevel", ":", "sampleformat", "=", "{", "'u'", ":", "1", ",", "'i'", ":", "2", ",", "'f'", ":", "3", ",", "'c'", ":", "6", "}", "[", "datadtype", ".", "kind", "]", "addtag", "(", "'SampleFormat'", ",", "'H'", ",", "samplesperpixel", ",", "(", "sampleformat", ",", ")", "*", "samplesperpixel", ")", "addtag", "(", "'PhotometricInterpretation'", ",", "'H'", ",", "1", ",", "photometric", ".", "value", ")", "if", "colormap", "is", "not", "None", ":", "addtag", "(", "'ColorMap'", ",", "'H'", ",", "colormap", ".", "size", ",", "colormap", ")", "addtag", "(", "'SamplesPerPixel'", ",", "'H'", ",", "1", ",", "samplesperpixel", ")", "if", "bilevel", ":", "pass", "elif", "planarconfig", "and", "samplesperpixel", ">", "1", ":", "addtag", "(", "'PlanarConfiguration'", ",", "'H'", ",", "1", ",", "planarconfig", ".", "value", ")", "addtag", "(", "'BitsPerSample'", ",", "'H'", ",", "samplesperpixel", ",", "(", "datadtype", ".", "itemsize", "*", "8", ",", ")", "*", "samplesperpixel", ")", "else", ":", "addtag", "(", "'BitsPerSample'", ",", "'H'", ",", "1", ",", "datadtype", ".", "itemsize", "*", "8", ")", "if", "extrasamples", ":", "if", "extrasamples_", "is", "not", "None", ":", "if", "extrasamples", "!=", "len", "(", "extrasamples_", ")", ":", "raise", "ValueError", "(", "'wrong number of extrasamples specified'", ")", "addtag", "(", "'ExtraSamples'", ",", "'H'", ",", "extrasamples", ",", "extrasamples_", ")", "elif", "photometric", "==", "RGB", "and", "extrasamples", "==", "1", ":", "# Unassociated alpha channel", "addtag", "(", "'ExtraSamples'", ",", "'H'", ",", "1", ",", "2", ")", "else", ":", "# Unspecified alpha channel", "addtag", "(", "'ExtraSamples'", ",", "'H'", ",", "extrasamples", ",", "(", "0", ",", ")", "*", "extrasamples", ")", "if", "resolution", "is", "not", "None", ":", "addtag", "(", "'XResolution'", ",", "'2I'", ",", "1", ",", "rational", "(", "resolution", "[", "0", "]", ")", ")", "addtag", "(", "'YResolution'", ",", "'2I'", ",", "1", ",", "rational", "(", "resolution", "[", "1", "]", ")", ")", "if", "len", "(", "resolution", ")", ">", "2", ":", "unit", "=", "resolution", "[", "2", "]", "unit", "=", "1", "if", "unit", "is", "None", "else", "enumarg", "(", "TIFF", ".", "RESUNIT", ",", "unit", ")", "elif", "self", ".", "_imagej", ":", "unit", "=", "1", "else", ":", "unit", "=", "2", "addtag", "(", "'ResolutionUnit'", ",", "'H'", ",", "1", ",", "unit", ")", "elif", "not", "self", ".", "_imagej", ":", "addtag", "(", "'XResolution'", ",", "'2I'", ",", "1", ",", "(", "1", ",", "1", ")", ")", "addtag", "(", "'YResolution'", ",", "'2I'", ",", "1", ",", "(", "1", ",", "1", ")", ")", "addtag", "(", "'ResolutionUnit'", ",", "'H'", ",", "1", ",", "1", ")", "if", "ijmetadata", ":", "for", "t", "in", "imagej_metadata_tag", "(", "ijmetadata", ",", "byteorder", ")", ":", "addtag", "(", "*", "t", ")", "contiguous", "=", "not", "compress", "if", "tile", ":", "# one chunk per tile per plane", "tiles", "=", "(", "(", "shape", "[", "2", "]", "+", "tile", "[", "0", "]", "-", "1", ")", "//", "tile", "[", "0", "]", ",", "(", "shape", "[", "3", "]", "+", "tile", "[", "1", "]", "-", "1", ")", "//", "tile", "[", "1", "]", ",", "(", "shape", "[", "4", "]", "+", "tile", "[", "2", "]", "-", "1", ")", "//", "tile", "[", "2", "]", ")", "numtiles", "=", "product", "(", "tiles", ")", "*", "shape", "[", "1", "]", "databytecounts", "=", "[", "product", "(", "tile", ")", "*", "shape", "[", "-", "1", "]", "*", "datadtype", ".", "itemsize", "]", "*", "numtiles", "addtag", "(", "tagbytecounts", ",", "offsetformat", ",", "numtiles", ",", "databytecounts", ")", "addtag", "(", "tagoffsets", ",", "offsetformat", ",", "numtiles", ",", "[", "0", "]", "*", "numtiles", ")", "contiguous", "=", "contiguous", "and", "product", "(", "tiles", ")", "==", "1", "if", "not", "contiguous", ":", "# allocate tile buffer", "chunk", "=", "numpy", ".", "empty", "(", "tile", "+", "(", "shape", "[", "-", "1", "]", ",", ")", ",", "dtype", "=", "datadtype", ")", "elif", "contiguous", "and", "(", "bilevel", "or", "rowsperstrip", "is", "None", ")", ":", "# one strip per plane", "if", "bilevel", ":", "databytecounts", "=", "[", "product", "(", "datashape", "[", "2", ":", "]", ")", "]", "*", "shape", "[", "1", "]", "else", ":", "databytecounts", "=", "[", "product", "(", "datashape", "[", "2", ":", "]", ")", "*", "datadtype", ".", "itemsize", "]", "*", "shape", "[", "1", "]", "addtag", "(", "tagbytecounts", ",", "offsetformat", ",", "shape", "[", "1", "]", ",", "databytecounts", ")", "addtag", "(", "tagoffsets", ",", "offsetformat", ",", "shape", "[", "1", "]", ",", "[", "0", "]", "*", "shape", "[", "1", "]", ")", "addtag", "(", "'RowsPerStrip'", ",", "'I'", ",", "1", ",", "shape", "[", "-", "3", "]", ")", "else", ":", "# use rowsperstrip", "rowsize", "=", "product", "(", "shape", "[", "-", "2", ":", "]", ")", "*", "datadtype", ".", "itemsize", "if", "rowsperstrip", "is", "None", ":", "# compress ~64 KB chunks by default", "rowsperstrip", "=", "65536", "//", "rowsize", "if", "compress", "else", "shape", "[", "-", "3", "]", "if", "rowsperstrip", "<", "1", ":", "rowsperstrip", "=", "1", "elif", "rowsperstrip", ">", "shape", "[", "-", "3", "]", ":", "rowsperstrip", "=", "shape", "[", "-", "3", "]", "addtag", "(", "'RowsPerStrip'", ",", "'I'", ",", "1", ",", "rowsperstrip", ")", "numstrips1", "=", "(", "shape", "[", "-", "3", "]", "+", "rowsperstrip", "-", "1", ")", "//", "rowsperstrip", "numstrips", "=", "numstrips1", "*", "shape", "[", "1", "]", "if", "compress", ":", "databytecounts", "=", "[", "0", "]", "*", "numstrips", "else", ":", "# TODO: save bilevel data with rowsperstrip", "stripsize", "=", "rowsperstrip", "*", "rowsize", "databytecounts", "=", "[", "stripsize", "]", "*", "numstrips", "stripsize", "-=", "rowsize", "*", "(", "numstrips1", "*", "rowsperstrip", "-", "shape", "[", "-", "3", "]", ")", "for", "i", "in", "range", "(", "numstrips1", "-", "1", ",", "numstrips", ",", "numstrips1", ")", ":", "databytecounts", "[", "i", "]", "=", "stripsize", "addtag", "(", "tagbytecounts", ",", "offsetformat", ",", "numstrips", ",", "databytecounts", ")", "addtag", "(", "tagoffsets", ",", "offsetformat", ",", "numstrips", ",", "[", "0", "]", "*", "numstrips", ")", "if", "data", "is", "None", "and", "not", "contiguous", ":", "raise", "ValueError", "(", "'cannot write non-contiguous empty file'", ")", "# add extra tags from user", "for", "t", "in", "extratags", ":", "addtag", "(", "*", "t", ")", "# TODO: check TIFFReadDirectoryCheckOrder warning in files containing", "# multiple tags of same code", "# the entries in an IFD must be sorted in ascending order by tag code", "tags", "=", "sorted", "(", "tags", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "fhpos", "=", "fh", ".", "tell", "(", ")", "if", "not", "(", "self", ".", "_bigtiff", "or", "self", ".", "_imagej", ")", "and", "fhpos", "+", "datasize", ">", "2", "**", "32", "-", "1", ":", "raise", "ValueError", "(", "'data too large for standard TIFF file'", ")", "# if not compressed or multi-tiled, write the first IFD and then", "# all data contiguously; else, write all IFDs and data interleaved", "for", "pageindex", "in", "range", "(", "1", "if", "contiguous", "else", "shape", "[", "0", "]", ")", ":", "ifdpos", "=", "fhpos", "if", "ifdpos", "%", "2", ":", "# location of IFD must begin on a word boundary", "fh", ".", "write", "(", "b'\\0'", ")", "ifdpos", "+=", "1", "# update pointer at ifdoffset", "fh", ".", "seek", "(", "self", ".", "_ifdoffset", ")", "fh", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", ")", ")", "fh", ".", "seek", "(", "ifdpos", ")", "# create IFD in memory", "if", "pageindex", "<", "2", ":", "ifd", "=", "io", ".", "BytesIO", "(", ")", "ifd", ".", "write", "(", "pack", "(", "tagnoformat", ",", "len", "(", "tags", ")", ")", ")", "tagoffset", "=", "ifd", ".", "tell", "(", ")", "ifd", ".", "write", "(", "b''", ".", "join", "(", "t", "[", "1", "]", "for", "t", "in", "tags", ")", ")", "ifdoffset", "=", "ifd", ".", "tell", "(", ")", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "0", ")", ")", "# offset to next IFD", "# write tag values and patch offsets in ifdentries", "for", "tagindex", ",", "tag", "in", "enumerate", "(", "tags", ")", ":", "offset", "=", "tagoffset", "+", "tagindex", "*", "tagsize", "+", "offsetsize", "+", "4", "code", "=", "tag", "[", "0", "]", "value", "=", "tag", "[", "2", "]", "if", "value", ":", "pos", "=", "ifd", ".", "tell", "(", ")", "if", "pos", "%", "2", ":", "# tag value is expected to begin on word boundary", "ifd", ".", "write", "(", "b'\\0'", ")", "pos", "+=", "1", "ifd", ".", "seek", "(", "offset", ")", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", "+", "pos", ")", ")", "ifd", ".", "seek", "(", "pos", ")", "ifd", ".", "write", "(", "value", ")", "if", "code", "==", "tagoffsets", ":", "dataoffsetsoffset", "=", "offset", ",", "pos", "elif", "code", "==", "tagbytecounts", ":", "databytecountsoffset", "=", "offset", ",", "pos", "elif", "code", "==", "270", "and", "value", ".", "endswith", "(", "b'\\0\\0\\0\\0'", ")", ":", "# image description buffer", "self", ".", "_descriptionoffset", "=", "ifdpos", "+", "pos", "self", ".", "_descriptionlenoffset", "=", "(", "ifdpos", "+", "tagoffset", "+", "tagindex", "*", "tagsize", "+", "4", ")", "elif", "code", "==", "tagoffsets", ":", "dataoffsetsoffset", "=", "offset", ",", "None", "elif", "code", "==", "tagbytecounts", ":", "databytecountsoffset", "=", "offset", ",", "None", "ifdsize", "=", "ifd", ".", "tell", "(", ")", "if", "ifdsize", "%", "2", ":", "ifd", ".", "write", "(", "b'\\0'", ")", "ifdsize", "+=", "1", "# write IFD later when strip/tile bytecounts and offsets are known", "fh", ".", "seek", "(", "ifdsize", ",", "1", ")", "# write image data", "dataoffset", "=", "fh", ".", "tell", "(", ")", "skip", "=", "align", "-", "dataoffset", "%", "align", "fh", ".", "seek", "(", "skip", ",", "1", ")", "dataoffset", "+=", "skip", "if", "contiguous", ":", "if", "data", "is", "None", ":", "fh", ".", "write_empty", "(", "datasize", ")", "else", ":", "fh", ".", "write_array", "(", "data", ")", "elif", "tile", ":", "if", "data", "is", "None", ":", "fh", ".", "write_empty", "(", "numtiles", "*", "databytecounts", "[", "0", "]", ")", "else", ":", "stripindex", "=", "0", "for", "plane", "in", "data", "[", "pageindex", "]", ":", "for", "tz", "in", "range", "(", "tiles", "[", "0", "]", ")", ":", "for", "ty", "in", "range", "(", "tiles", "[", "1", "]", ")", ":", "for", "tx", "in", "range", "(", "tiles", "[", "2", "]", ")", ":", "c0", "=", "min", "(", "tile", "[", "0", "]", ",", "shape", "[", "2", "]", "-", "tz", "*", "tile", "[", "0", "]", ")", "c1", "=", "min", "(", "tile", "[", "1", "]", ",", "shape", "[", "3", "]", "-", "ty", "*", "tile", "[", "1", "]", ")", "c2", "=", "min", "(", "tile", "[", "2", "]", ",", "shape", "[", "4", "]", "-", "tx", "*", "tile", "[", "2", "]", ")", "chunk", "[", "c0", ":", ",", "c1", ":", ",", "c2", ":", "]", "=", "0", "chunk", "[", ":", "c0", ",", ":", "c1", ",", ":", "c2", "]", "=", "plane", "[", "tz", "*", "tile", "[", "0", "]", ":", "tz", "*", "tile", "[", "0", "]", "+", "c0", ",", "ty", "*", "tile", "[", "1", "]", ":", "ty", "*", "tile", "[", "1", "]", "+", "c1", ",", "tx", "*", "tile", "[", "2", "]", ":", "tx", "*", "tile", "[", "2", "]", "+", "c2", "]", "if", "compress", ":", "t", "=", "compress", "(", "chunk", ")", "fh", ".", "write", "(", "t", ")", "databytecounts", "[", "stripindex", "]", "=", "len", "(", "t", ")", "stripindex", "+=", "1", "else", ":", "fh", ".", "write_array", "(", "chunk", ")", "# fh.flush()", "elif", "compress", ":", "# write one strip per rowsperstrip", "assert", "data", ".", "shape", "[", "2", "]", "==", "1", "# not handling depth", "numstrips", "=", "(", "shape", "[", "-", "3", "]", "+", "rowsperstrip", "-", "1", ")", "//", "rowsperstrip", "stripindex", "=", "0", "for", "plane", "in", "data", "[", "pageindex", "]", ":", "for", "i", "in", "range", "(", "numstrips", ")", ":", "strip", "=", "plane", "[", "0", ",", "i", "*", "rowsperstrip", ":", "(", "i", "+", "1", ")", "*", "rowsperstrip", "]", "strip", "=", "compress", "(", "strip", ")", "fh", ".", "write", "(", "strip", ")", "databytecounts", "[", "stripindex", "]", "=", "len", "(", "strip", ")", "stripindex", "+=", "1", "else", ":", "fh", ".", "write_array", "(", "data", "[", "pageindex", "]", ")", "# update strip/tile offsets", "offset", ",", "pos", "=", "dataoffsetsoffset", "ifd", ".", "seek", "(", "offset", ")", "if", "pos", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", "+", "pos", ")", ")", "ifd", ".", "seek", "(", "pos", ")", "offset", "=", "dataoffset", "for", "size", "in", "databytecounts", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "offset", ")", ")", "offset", "+=", "size", "else", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "dataoffset", ")", ")", "if", "compress", ":", "# update strip/tile bytecounts", "offset", ",", "pos", "=", "databytecountsoffset", "ifd", ".", "seek", "(", "offset", ")", "if", "pos", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", "+", "pos", ")", ")", "ifd", ".", "seek", "(", "pos", ")", "for", "size", "in", "databytecounts", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "size", ")", ")", "else", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "databytecounts", "[", "0", "]", ")", ")", "fhpos", "=", "fh", ".", "tell", "(", ")", "fh", ".", "seek", "(", "ifdpos", ")", "fh", ".", "write", "(", "iogetbuffer", "(", "ifd", ")", ")", "fh", ".", "flush", "(", ")", "fh", ".", "seek", "(", "fhpos", ")", "self", ".", "_ifdoffset", "=", "ifdpos", "+", "ifdoffset", "# remove tags that should be written only once", "if", "pageindex", "==", "0", ":", "tags", "=", "[", "tag", "for", "tag", "in", "tags", "if", "not", "tag", "[", "-", "1", "]", "]", "self", ".", "_shape", "=", "shape", "self", ".", "_datashape", "=", "(", "1", ",", ")", "+", "input_shape", "self", ".", "_datadtype", "=", "datadtype", "self", ".", "_dataoffset", "=", "dataoffset", "self", ".", "_databytecounts", "=", "databytecounts", "if", "contiguous", ":", "# write remaining IFDs/tags later", "self", ".", "_tags", "=", "tags", "# return offset and size of image data", "if", "returnoffset", ":", "return", "dataoffset", ",", "sum", "(", "databytecounts", ")", "return", "None" ]
Write numpy array and tags to TIFF file. The data shape's last dimensions are assumed to be image depth, height (length), width, and samples. If a colormap is provided, the data's dtype must be uint8 or uint16 and the data values are indices into the last dimension of the colormap. If 'shape' and 'dtype' are specified, an empty array is saved. This option cannot be used with compression or multiple tiles. Image data are written uncompressed in one strip per plane by default. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The SampleFormat and BitsPerSample tags are derived from the data type. Parameters ---------- data : numpy.ndarray or None Input image array. shape : tuple or None Shape of the empty array to save. Used only if 'data' is None. dtype : numpy.dtype or None Data-type of the empty array to save. Used only if 'data' is None. returnoffset : bool If True and the image data in the file is memory-mappable, return the offset and number of bytes of the image data in the file. photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} The color space of the image data. By default, this setting is inferred from the data shape and the value of colormap. For CFA images, DNG tags must be specified in 'extratags'. planarconfig : {'CONTIG', 'SEPARATE'} Specifies if samples are stored interleaved or in separate planes. By default, this setting is inferred from the data shape. If this parameter is set, extra samples are used to store grayscale images. 'CONTIG': last dimension contains samples. 'SEPARATE': third last dimension contains samples. extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'} Defines the interpretation of extra components in pixels. 'UNSPECIFIED': no transparency information (default). 'ASSOCALPHA': single, true transparency with pre-multiplied color. 'UNASSALPHA': independent transparency masks. tile : tuple of int The shape (depth, length, width) of image tiles to write. If None (default), image data are written in strips. The tile length and width must be a multiple of 16. If the tile depth is provided, the SGI ImageDepth and TileDepth tags are used to save volume data. Unless a single tile is used, tiles cannot be used to write contiguous files. Few software can read the SGI format, e.g. MeVisLab. contiguous : bool If True (default) and the data and parameters are compatible with previous ones, if any, the image data are stored contiguously after the previous one. In that case, 'photometric', 'planarconfig', 'rowsperstrip', are ignored. Metadata such as 'description', 'metadata', 'datetime', and 'extratags' are written to the first page of a contiguous series only. align : int Byte boundary on which to align the image data in the file. Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. Following contiguous writes are not aligned. truncate : bool If True, only write the first page including shape metadata if possible (uncompressed, contiguous, not tiled). Other TIFF readers will only be able to read part of the data. compress : int or str or (str, int) If 0 (default), data are written uncompressed. If 0-9, the level of ADOBE_DEFLATE compression. If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'. If a tuple, first item is one of TIFF.COMPRESSION and second item is compression level. Compression cannot be used to write contiguous files. rowsperstrip : int The number of rows per strip. By default strips will be ~64 KB if compression is enabled, else rowsperstrip is set to the image length. Bilevel images are always stored in one strip per plane. predictor : bool If True, apply horizontal differencing or floating-point predictor before compression. colormap : numpy.ndarray RGB color values for the corresponding data value. Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. description : str The subject of the image. Must be 7-bit ASCII. Cannot be used with the ImageJ format. Saved with the first page only. datetime : datetime, str, or bool Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or datetime object. Else if True, the current date and time is used. Saved with the first page only. resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) X and Y resolutions in pixels per resolution unit as float or rational numbers. A third, optional parameter specifies the resolution unit, which must be None (default for ImageJ), 'INCH' (default), or 'CENTIMETER'. subfiletype : int Bitfield to indicate the kind of data. Set bit 0 if the image is a reduced-resolution version of another image. Set bit 1 if the image is part of a multi-page image. Set bit 2 if the image is transparency mask for another image (photometric must be MASK, SamplesPerPixel and BitsPerSample must be 1). software : str Name of the software used to create the file. Must be 7-bit ASCII. Saved with the first page only. metadata : dict Additional metadata to be saved along with shape information in JSON or ImageJ formats in an ImageDescription tag. If None, do not write a second ImageDescription tag. Strings must be 7-bit ASCII. Saved with the first page only. ijmetadata : dict Additional metadata to be saved in application specific IJMetadata and IJMetadataByteCounts tags. Refer to the imagej_metadata_tag function for valid keys and values. Saved with the first page only. extratags : sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. count : int Number of data values. Not used for string or byte string values. value : sequence 'Count' values compatible with 'dtype'. Byte strings must contain count values of dtype packed as binary data. writeonce : bool If True, the tag is written to the first page only.
[ "Write", "numpy", "array", "and", "tags", "to", "TIFF", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L890-L1743
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffWriter._write_remaining_pages
def _write_remaining_pages(self): """Write outstanding IFDs and tags to file.""" if not self._tags or self._truncate: return pageno = self._shape[0] * self._datashape[0] - 1 if pageno < 1: self._tags = None self._datadtype = None self._dataoffset = None self._databytecounts = None return fh = self._fh fhpos = fh.tell() if fhpos % 2: fh.write(b'\0') fhpos += 1 pack = struct.pack offsetformat = self._byteorder + self._offsetformat offsetsize = self._offsetsize tagnoformat = self._byteorder + self._tagnoformat tagsize = self._tagsize dataoffset = self._dataoffset pagedatasize = sum(self._databytecounts) # construct template IFD in memory # need to patch offsets to next IFD and data before writing to file ifd = io.BytesIO() ifd.write(pack(tagnoformat, len(self._tags))) tagoffset = ifd.tell() ifd.write(b''.join(t[1] for t in self._tags)) ifdoffset = ifd.tell() ifd.write(pack(offsetformat, 0)) # offset to next IFD # tag values for tagindex, tag in enumerate(self._tags): offset = tagoffset + tagindex * tagsize + offsetsize + 4 code = tag[0] value = tag[2] if value: pos = ifd.tell() if pos % 2: # tag value is expected to begin on word boundary ifd.write(b'\0') pos += 1 ifd.seek(offset) try: ifd.write(pack(offsetformat, fhpos + pos)) except Exception: # struct.error if self._imagej: warnings.warn('truncating ImageJ file') self._truncate = True return raise ValueError('data too large for non-BigTIFF file') ifd.seek(pos) ifd.write(value) if code == self._tagoffsets: # save strip/tile offsets for later updates dataoffsetsoffset = offset, pos elif code == self._tagoffsets: dataoffsetsoffset = offset, None ifdsize = ifd.tell() if ifdsize % 2: ifd.write(b'\0') ifdsize += 1 # check if all IFDs fit in file if not self._bigtiff and fhpos + ifdsize * pageno > 2**32 - 32: if self._imagej: warnings.warn('truncating ImageJ file') self._truncate = True return raise ValueError('data too large for non-BigTIFF file') # assemble IFD chain in memory from IFD template ifds = io.BytesIO(bytes(ifdsize * pageno)) ifdpos = fhpos for _ in range(pageno): # update strip/tile offsets in IFD dataoffset += pagedatasize # offset to image data offset, pos = dataoffsetsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) offset = dataoffset for size in self._databytecounts: ifd.write(pack(offsetformat, offset)) offset += size else: ifd.write(pack(offsetformat, dataoffset)) # update pointer at ifdoffset to point to next IFD in file ifdpos += ifdsize ifd.seek(ifdoffset) ifd.write(pack(offsetformat, ifdpos)) # write IFD entry ifds.write(iogetbuffer(ifd)) # terminate IFD chain ifdoffset += ifdsize * (pageno - 1) ifds.seek(ifdoffset) ifds.write(pack(offsetformat, 0)) # write IFD chain to file fh.write(iogetbuffer(ifds)) # update file to point to new IFD chain pos = fh.tell() fh.seek(self._ifdoffset) fh.write(pack(offsetformat, fhpos)) fh.flush() fh.seek(pos) self._ifdoffset = fhpos + ifdoffset self._tags = None self._datadtype = None self._dataoffset = None self._databytecounts = None
python
def _write_remaining_pages(self): """Write outstanding IFDs and tags to file.""" if not self._tags or self._truncate: return pageno = self._shape[0] * self._datashape[0] - 1 if pageno < 1: self._tags = None self._datadtype = None self._dataoffset = None self._databytecounts = None return fh = self._fh fhpos = fh.tell() if fhpos % 2: fh.write(b'\0') fhpos += 1 pack = struct.pack offsetformat = self._byteorder + self._offsetformat offsetsize = self._offsetsize tagnoformat = self._byteorder + self._tagnoformat tagsize = self._tagsize dataoffset = self._dataoffset pagedatasize = sum(self._databytecounts) # construct template IFD in memory # need to patch offsets to next IFD and data before writing to file ifd = io.BytesIO() ifd.write(pack(tagnoformat, len(self._tags))) tagoffset = ifd.tell() ifd.write(b''.join(t[1] for t in self._tags)) ifdoffset = ifd.tell() ifd.write(pack(offsetformat, 0)) # offset to next IFD # tag values for tagindex, tag in enumerate(self._tags): offset = tagoffset + tagindex * tagsize + offsetsize + 4 code = tag[0] value = tag[2] if value: pos = ifd.tell() if pos % 2: # tag value is expected to begin on word boundary ifd.write(b'\0') pos += 1 ifd.seek(offset) try: ifd.write(pack(offsetformat, fhpos + pos)) except Exception: # struct.error if self._imagej: warnings.warn('truncating ImageJ file') self._truncate = True return raise ValueError('data too large for non-BigTIFF file') ifd.seek(pos) ifd.write(value) if code == self._tagoffsets: # save strip/tile offsets for later updates dataoffsetsoffset = offset, pos elif code == self._tagoffsets: dataoffsetsoffset = offset, None ifdsize = ifd.tell() if ifdsize % 2: ifd.write(b'\0') ifdsize += 1 # check if all IFDs fit in file if not self._bigtiff and fhpos + ifdsize * pageno > 2**32 - 32: if self._imagej: warnings.warn('truncating ImageJ file') self._truncate = True return raise ValueError('data too large for non-BigTIFF file') # assemble IFD chain in memory from IFD template ifds = io.BytesIO(bytes(ifdsize * pageno)) ifdpos = fhpos for _ in range(pageno): # update strip/tile offsets in IFD dataoffset += pagedatasize # offset to image data offset, pos = dataoffsetsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) offset = dataoffset for size in self._databytecounts: ifd.write(pack(offsetformat, offset)) offset += size else: ifd.write(pack(offsetformat, dataoffset)) # update pointer at ifdoffset to point to next IFD in file ifdpos += ifdsize ifd.seek(ifdoffset) ifd.write(pack(offsetformat, ifdpos)) # write IFD entry ifds.write(iogetbuffer(ifd)) # terminate IFD chain ifdoffset += ifdsize * (pageno - 1) ifds.seek(ifdoffset) ifds.write(pack(offsetformat, 0)) # write IFD chain to file fh.write(iogetbuffer(ifds)) # update file to point to new IFD chain pos = fh.tell() fh.seek(self._ifdoffset) fh.write(pack(offsetformat, fhpos)) fh.flush() fh.seek(pos) self._ifdoffset = fhpos + ifdoffset self._tags = None self._datadtype = None self._dataoffset = None self._databytecounts = None
[ "def", "_write_remaining_pages", "(", "self", ")", ":", "if", "not", "self", ".", "_tags", "or", "self", ".", "_truncate", ":", "return", "pageno", "=", "self", ".", "_shape", "[", "0", "]", "*", "self", ".", "_datashape", "[", "0", "]", "-", "1", "if", "pageno", "<", "1", ":", "self", ".", "_tags", "=", "None", "self", ".", "_datadtype", "=", "None", "self", ".", "_dataoffset", "=", "None", "self", ".", "_databytecounts", "=", "None", "return", "fh", "=", "self", ".", "_fh", "fhpos", "=", "fh", ".", "tell", "(", ")", "if", "fhpos", "%", "2", ":", "fh", ".", "write", "(", "b'\\0'", ")", "fhpos", "+=", "1", "pack", "=", "struct", ".", "pack", "offsetformat", "=", "self", ".", "_byteorder", "+", "self", ".", "_offsetformat", "offsetsize", "=", "self", ".", "_offsetsize", "tagnoformat", "=", "self", ".", "_byteorder", "+", "self", ".", "_tagnoformat", "tagsize", "=", "self", ".", "_tagsize", "dataoffset", "=", "self", ".", "_dataoffset", "pagedatasize", "=", "sum", "(", "self", ".", "_databytecounts", ")", "# construct template IFD in memory", "# need to patch offsets to next IFD and data before writing to file", "ifd", "=", "io", ".", "BytesIO", "(", ")", "ifd", ".", "write", "(", "pack", "(", "tagnoformat", ",", "len", "(", "self", ".", "_tags", ")", ")", ")", "tagoffset", "=", "ifd", ".", "tell", "(", ")", "ifd", ".", "write", "(", "b''", ".", "join", "(", "t", "[", "1", "]", "for", "t", "in", "self", ".", "_tags", ")", ")", "ifdoffset", "=", "ifd", ".", "tell", "(", ")", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "0", ")", ")", "# offset to next IFD", "# tag values", "for", "tagindex", ",", "tag", "in", "enumerate", "(", "self", ".", "_tags", ")", ":", "offset", "=", "tagoffset", "+", "tagindex", "*", "tagsize", "+", "offsetsize", "+", "4", "code", "=", "tag", "[", "0", "]", "value", "=", "tag", "[", "2", "]", "if", "value", ":", "pos", "=", "ifd", ".", "tell", "(", ")", "if", "pos", "%", "2", ":", "# tag value is expected to begin on word boundary", "ifd", ".", "write", "(", "b'\\0'", ")", "pos", "+=", "1", "ifd", ".", "seek", "(", "offset", ")", "try", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "fhpos", "+", "pos", ")", ")", "except", "Exception", ":", "# struct.error", "if", "self", ".", "_imagej", ":", "warnings", ".", "warn", "(", "'truncating ImageJ file'", ")", "self", ".", "_truncate", "=", "True", "return", "raise", "ValueError", "(", "'data too large for non-BigTIFF file'", ")", "ifd", ".", "seek", "(", "pos", ")", "ifd", ".", "write", "(", "value", ")", "if", "code", "==", "self", ".", "_tagoffsets", ":", "# save strip/tile offsets for later updates", "dataoffsetsoffset", "=", "offset", ",", "pos", "elif", "code", "==", "self", ".", "_tagoffsets", ":", "dataoffsetsoffset", "=", "offset", ",", "None", "ifdsize", "=", "ifd", ".", "tell", "(", ")", "if", "ifdsize", "%", "2", ":", "ifd", ".", "write", "(", "b'\\0'", ")", "ifdsize", "+=", "1", "# check if all IFDs fit in file", "if", "not", "self", ".", "_bigtiff", "and", "fhpos", "+", "ifdsize", "*", "pageno", ">", "2", "**", "32", "-", "32", ":", "if", "self", ".", "_imagej", ":", "warnings", ".", "warn", "(", "'truncating ImageJ file'", ")", "self", ".", "_truncate", "=", "True", "return", "raise", "ValueError", "(", "'data too large for non-BigTIFF file'", ")", "# assemble IFD chain in memory from IFD template", "ifds", "=", "io", ".", "BytesIO", "(", "bytes", "(", "ifdsize", "*", "pageno", ")", ")", "ifdpos", "=", "fhpos", "for", "_", "in", "range", "(", "pageno", ")", ":", "# update strip/tile offsets in IFD", "dataoffset", "+=", "pagedatasize", "# offset to image data", "offset", ",", "pos", "=", "dataoffsetsoffset", "ifd", ".", "seek", "(", "offset", ")", "if", "pos", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", "+", "pos", ")", ")", "ifd", ".", "seek", "(", "pos", ")", "offset", "=", "dataoffset", "for", "size", "in", "self", ".", "_databytecounts", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "offset", ")", ")", "offset", "+=", "size", "else", ":", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "dataoffset", ")", ")", "# update pointer at ifdoffset to point to next IFD in file", "ifdpos", "+=", "ifdsize", "ifd", ".", "seek", "(", "ifdoffset", ")", "ifd", ".", "write", "(", "pack", "(", "offsetformat", ",", "ifdpos", ")", ")", "# write IFD entry", "ifds", ".", "write", "(", "iogetbuffer", "(", "ifd", ")", ")", "# terminate IFD chain", "ifdoffset", "+=", "ifdsize", "*", "(", "pageno", "-", "1", ")", "ifds", ".", "seek", "(", "ifdoffset", ")", "ifds", ".", "write", "(", "pack", "(", "offsetformat", ",", "0", ")", ")", "# write IFD chain to file", "fh", ".", "write", "(", "iogetbuffer", "(", "ifds", ")", ")", "# update file to point to new IFD chain", "pos", "=", "fh", ".", "tell", "(", ")", "fh", ".", "seek", "(", "self", ".", "_ifdoffset", ")", "fh", ".", "write", "(", "pack", "(", "offsetformat", ",", "fhpos", ")", ")", "fh", ".", "flush", "(", ")", "fh", ".", "seek", "(", "pos", ")", "self", ".", "_ifdoffset", "=", "fhpos", "+", "ifdoffset", "self", ".", "_tags", "=", "None", "self", ".", "_datadtype", "=", "None", "self", ".", "_dataoffset", "=", "None", "self", ".", "_databytecounts", "=", "None" ]
Write outstanding IFDs and tags to file.
[ "Write", "outstanding", "IFDs", "and", "tags", "to", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L1745-L1862
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffWriter._write_image_description
def _write_image_description(self): """Write metadata to ImageDescription tag.""" if (not self._datashape or self._datashape[0] == 1 or self._descriptionoffset <= 0): return colormapped = self._colormap is not None if self._imagej: isrgb = self._shape[-1] in (3, 4) description = imagej_description( self._datashape, isrgb, colormapped, **self._metadata) else: description = json_description(self._datashape, **self._metadata) # rewrite description and its length to file description = description.encode('utf-8') description = description[:self._descriptionlen-1] pos = self._fh.tell() self._fh.seek(self._descriptionoffset) self._fh.write(description) self._fh.seek(self._descriptionlenoffset) self._fh.write(struct.pack(self._byteorder+self._offsetformat, len(description)+1)) self._fh.seek(pos) self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._descriptionlen = 0
python
def _write_image_description(self): """Write metadata to ImageDescription tag.""" if (not self._datashape or self._datashape[0] == 1 or self._descriptionoffset <= 0): return colormapped = self._colormap is not None if self._imagej: isrgb = self._shape[-1] in (3, 4) description = imagej_description( self._datashape, isrgb, colormapped, **self._metadata) else: description = json_description(self._datashape, **self._metadata) # rewrite description and its length to file description = description.encode('utf-8') description = description[:self._descriptionlen-1] pos = self._fh.tell() self._fh.seek(self._descriptionoffset) self._fh.write(description) self._fh.seek(self._descriptionlenoffset) self._fh.write(struct.pack(self._byteorder+self._offsetformat, len(description)+1)) self._fh.seek(pos) self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._descriptionlen = 0
[ "def", "_write_image_description", "(", "self", ")", ":", "if", "(", "not", "self", ".", "_datashape", "or", "self", ".", "_datashape", "[", "0", "]", "==", "1", "or", "self", ".", "_descriptionoffset", "<=", "0", ")", ":", "return", "colormapped", "=", "self", ".", "_colormap", "is", "not", "None", "if", "self", ".", "_imagej", ":", "isrgb", "=", "self", ".", "_shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "description", "=", "imagej_description", "(", "self", ".", "_datashape", ",", "isrgb", ",", "colormapped", ",", "*", "*", "self", ".", "_metadata", ")", "else", ":", "description", "=", "json_description", "(", "self", ".", "_datashape", ",", "*", "*", "self", ".", "_metadata", ")", "# rewrite description and its length to file", "description", "=", "description", ".", "encode", "(", "'utf-8'", ")", "description", "=", "description", "[", ":", "self", ".", "_descriptionlen", "-", "1", "]", "pos", "=", "self", ".", "_fh", ".", "tell", "(", ")", "self", ".", "_fh", ".", "seek", "(", "self", ".", "_descriptionoffset", ")", "self", ".", "_fh", ".", "write", "(", "description", ")", "self", ".", "_fh", ".", "seek", "(", "self", ".", "_descriptionlenoffset", ")", "self", ".", "_fh", ".", "write", "(", "struct", ".", "pack", "(", "self", ".", "_byteorder", "+", "self", ".", "_offsetformat", ",", "len", "(", "description", ")", "+", "1", ")", ")", "self", ".", "_fh", ".", "seek", "(", "pos", ")", "self", ".", "_descriptionoffset", "=", "0", "self", ".", "_descriptionlenoffset", "=", "0", "self", ".", "_descriptionlen", "=", "0" ]
Write metadata to ImageDescription tag.
[ "Write", "metadata", "to", "ImageDescription", "tag", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L1865-L1892
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffWriter.close
def close(self): """Write remaining pages and close file handle.""" if not self._truncate: self._write_remaining_pages() self._write_image_description() self._fh.close()
python
def close(self): """Write remaining pages and close file handle.""" if not self._truncate: self._write_remaining_pages() self._write_image_description() self._fh.close()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_truncate", ":", "self", ".", "_write_remaining_pages", "(", ")", "self", ".", "_write_image_description", "(", ")", "self", ".", "_fh", ".", "close", "(", ")" ]
Write remaining pages and close file handle.
[ "Write", "remaining", "pages", "and", "close", "file", "handle", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L1898-L1903
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.close
def close(self): """Close open file handle(s).""" for tif in self._files.values(): tif.filehandle.close() self._files = {}
python
def close(self): """Close open file handle(s).""" for tif in self._files.values(): tif.filehandle.close() self._files = {}
[ "def", "close", "(", "self", ")", ":", "for", "tif", "in", "self", ".", "_files", ".", "values", "(", ")", ":", "tif", ".", "filehandle", ".", "close", "(", ")", "self", ".", "_files", "=", "{", "}" ]
Close open file handle(s).
[ "Close", "open", "file", "handle", "(", "s", ")", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2055-L2059
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.asarray
def asarray(self, key=None, series=None, out=None, validate=True, maxworkers=None): """Return image data from selected TIFF page(s) as numpy array. By default, the data from the first series is returned. Parameters ---------- key : int, slice, or sequence of indices Defines which pages to return as array. If None (default), data from a series (default 0) is returned. If not None, data from the specified pages in the whole file (if 'series' is None) or a specified series are returned as a stacked array. Requesting an array from multiple pages that are not compatible wrt. shape, dtype, compression etc is undefined, i.e. may crash or return incorrect values. series : int or TiffPageSeries Defines which series of pages to return as array. out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. validate : bool If True (default), validate various tags. Passed to TiffPage.asarray(). maxworkers : int or None Maximum number of threads to concurrently get data from pages or tiles. If None (default), mutli-threading is enabled if data are compressed. If 0, up to half the CPU cores are used. If 1, mutli-threading is disabled. Reading data from file is limited to a single thread. Using multiple threads can significantly speed up this function if the bottleneck is decoding compressed data, e.g. in case of large LZW compressed LSM files or JPEG compressed tiled slides. If the bottleneck is I/O or pure Python code, using multiple threads might be detrimental. Returns ------- numpy.ndarray Image data from the specified pages. See the TiffPage.asarray function for operations that are applied (or not) to the raw data stored in the file. """ if not self.pages: return numpy.array([]) if key is None and series is None: series = 0 if series is None: pages = self.pages else: try: series = self.series[series] except (KeyError, TypeError): pass pages = series.pages if key is None: pass elif series is None: pages = self.pages._getlist(key) elif isinstance(key, inttypes): pages = [pages[key]] elif isinstance(key, slice): pages = pages[key] elif isinstance(key, Iterable): pages = [pages[k] for k in key] else: raise TypeError('key must be an int, slice, or sequence') if not pages: raise ValueError('no pages selected') if key is None and series and series.offset: typecode = self.byteorder + series.dtype.char if pages[0].is_memmappable and (isinstance(out, str) and out == 'memmap'): # direct mapping result = self.filehandle.memmap_array( typecode, series.shape, series.offset) else: # read into output if out is not None: out = create_output(out, series.shape, series.dtype) self.filehandle.seek(series.offset) result = self.filehandle.read_array( typecode, product(series.shape), out=out) elif len(pages) == 1: result = pages[0].asarray(out=out, validate=validate, maxworkers=maxworkers) else: result = stack_pages(pages, out=out, maxworkers=maxworkers) if result is None: return None if key is None: try: result.shape = series.shape except ValueError: try: log.warning('TiffFile.asarray: failed to reshape %s to %s', result.shape, series.shape) # try series of expected shapes result.shape = (-1,) + series.shape except ValueError: # revert to generic shape result.shape = (-1,) + pages[0].shape elif len(pages) == 1: result.shape = pages[0].shape else: result.shape = (-1,) + pages[0].shape return result
python
def asarray(self, key=None, series=None, out=None, validate=True, maxworkers=None): """Return image data from selected TIFF page(s) as numpy array. By default, the data from the first series is returned. Parameters ---------- key : int, slice, or sequence of indices Defines which pages to return as array. If None (default), data from a series (default 0) is returned. If not None, data from the specified pages in the whole file (if 'series' is None) or a specified series are returned as a stacked array. Requesting an array from multiple pages that are not compatible wrt. shape, dtype, compression etc is undefined, i.e. may crash or return incorrect values. series : int or TiffPageSeries Defines which series of pages to return as array. out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. validate : bool If True (default), validate various tags. Passed to TiffPage.asarray(). maxworkers : int or None Maximum number of threads to concurrently get data from pages or tiles. If None (default), mutli-threading is enabled if data are compressed. If 0, up to half the CPU cores are used. If 1, mutli-threading is disabled. Reading data from file is limited to a single thread. Using multiple threads can significantly speed up this function if the bottleneck is decoding compressed data, e.g. in case of large LZW compressed LSM files or JPEG compressed tiled slides. If the bottleneck is I/O or pure Python code, using multiple threads might be detrimental. Returns ------- numpy.ndarray Image data from the specified pages. See the TiffPage.asarray function for operations that are applied (or not) to the raw data stored in the file. """ if not self.pages: return numpy.array([]) if key is None and series is None: series = 0 if series is None: pages = self.pages else: try: series = self.series[series] except (KeyError, TypeError): pass pages = series.pages if key is None: pass elif series is None: pages = self.pages._getlist(key) elif isinstance(key, inttypes): pages = [pages[key]] elif isinstance(key, slice): pages = pages[key] elif isinstance(key, Iterable): pages = [pages[k] for k in key] else: raise TypeError('key must be an int, slice, or sequence') if not pages: raise ValueError('no pages selected') if key is None and series and series.offset: typecode = self.byteorder + series.dtype.char if pages[0].is_memmappable and (isinstance(out, str) and out == 'memmap'): # direct mapping result = self.filehandle.memmap_array( typecode, series.shape, series.offset) else: # read into output if out is not None: out = create_output(out, series.shape, series.dtype) self.filehandle.seek(series.offset) result = self.filehandle.read_array( typecode, product(series.shape), out=out) elif len(pages) == 1: result = pages[0].asarray(out=out, validate=validate, maxworkers=maxworkers) else: result = stack_pages(pages, out=out, maxworkers=maxworkers) if result is None: return None if key is None: try: result.shape = series.shape except ValueError: try: log.warning('TiffFile.asarray: failed to reshape %s to %s', result.shape, series.shape) # try series of expected shapes result.shape = (-1,) + series.shape except ValueError: # revert to generic shape result.shape = (-1,) + pages[0].shape elif len(pages) == 1: result.shape = pages[0].shape else: result.shape = (-1,) + pages[0].shape return result
[ "def", "asarray", "(", "self", ",", "key", "=", "None", ",", "series", "=", "None", ",", "out", "=", "None", ",", "validate", "=", "True", ",", "maxworkers", "=", "None", ")", ":", "if", "not", "self", ".", "pages", ":", "return", "numpy", ".", "array", "(", "[", "]", ")", "if", "key", "is", "None", "and", "series", "is", "None", ":", "series", "=", "0", "if", "series", "is", "None", ":", "pages", "=", "self", ".", "pages", "else", ":", "try", ":", "series", "=", "self", ".", "series", "[", "series", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "pass", "pages", "=", "series", ".", "pages", "if", "key", "is", "None", ":", "pass", "elif", "series", "is", "None", ":", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "key", ")", "elif", "isinstance", "(", "key", ",", "inttypes", ")", ":", "pages", "=", "[", "pages", "[", "key", "]", "]", "elif", "isinstance", "(", "key", ",", "slice", ")", ":", "pages", "=", "pages", "[", "key", "]", "elif", "isinstance", "(", "key", ",", "Iterable", ")", ":", "pages", "=", "[", "pages", "[", "k", "]", "for", "k", "in", "key", "]", "else", ":", "raise", "TypeError", "(", "'key must be an int, slice, or sequence'", ")", "if", "not", "pages", ":", "raise", "ValueError", "(", "'no pages selected'", ")", "if", "key", "is", "None", "and", "series", "and", "series", ".", "offset", ":", "typecode", "=", "self", ".", "byteorder", "+", "series", ".", "dtype", ".", "char", "if", "pages", "[", "0", "]", ".", "is_memmappable", "and", "(", "isinstance", "(", "out", ",", "str", ")", "and", "out", "==", "'memmap'", ")", ":", "# direct mapping", "result", "=", "self", ".", "filehandle", ".", "memmap_array", "(", "typecode", ",", "series", ".", "shape", ",", "series", ".", "offset", ")", "else", ":", "# read into output", "if", "out", "is", "not", "None", ":", "out", "=", "create_output", "(", "out", ",", "series", ".", "shape", ",", "series", ".", "dtype", ")", "self", ".", "filehandle", ".", "seek", "(", "series", ".", "offset", ")", "result", "=", "self", ".", "filehandle", ".", "read_array", "(", "typecode", ",", "product", "(", "series", ".", "shape", ")", ",", "out", "=", "out", ")", "elif", "len", "(", "pages", ")", "==", "1", ":", "result", "=", "pages", "[", "0", "]", ".", "asarray", "(", "out", "=", "out", ",", "validate", "=", "validate", ",", "maxworkers", "=", "maxworkers", ")", "else", ":", "result", "=", "stack_pages", "(", "pages", ",", "out", "=", "out", ",", "maxworkers", "=", "maxworkers", ")", "if", "result", "is", "None", ":", "return", "None", "if", "key", "is", "None", ":", "try", ":", "result", ".", "shape", "=", "series", ".", "shape", "except", "ValueError", ":", "try", ":", "log", ".", "warning", "(", "'TiffFile.asarray: failed to reshape %s to %s'", ",", "result", ".", "shape", ",", "series", ".", "shape", ")", "# try series of expected shapes", "result", ".", "shape", "=", "(", "-", "1", ",", ")", "+", "series", ".", "shape", "except", "ValueError", ":", "# revert to generic shape", "result", ".", "shape", "=", "(", "-", "1", ",", ")", "+", "pages", "[", "0", "]", ".", "shape", "elif", "len", "(", "pages", ")", "==", "1", ":", "result", ".", "shape", "=", "pages", "[", "0", "]", ".", "shape", "else", ":", "result", ".", "shape", "=", "(", "-", "1", ",", ")", "+", "pages", "[", "0", "]", ".", "shape", "return", "result" ]
Return image data from selected TIFF page(s) as numpy array. By default, the data from the first series is returned. Parameters ---------- key : int, slice, or sequence of indices Defines which pages to return as array. If None (default), data from a series (default 0) is returned. If not None, data from the specified pages in the whole file (if 'series' is None) or a specified series are returned as a stacked array. Requesting an array from multiple pages that are not compatible wrt. shape, dtype, compression etc is undefined, i.e. may crash or return incorrect values. series : int or TiffPageSeries Defines which series of pages to return as array. out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. validate : bool If True (default), validate various tags. Passed to TiffPage.asarray(). maxworkers : int or None Maximum number of threads to concurrently get data from pages or tiles. If None (default), mutli-threading is enabled if data are compressed. If 0, up to half the CPU cores are used. If 1, mutli-threading is disabled. Reading data from file is limited to a single thread. Using multiple threads can significantly speed up this function if the bottleneck is decoding compressed data, e.g. in case of large LZW compressed LSM files or JPEG compressed tiled slides. If the bottleneck is I/O or pure Python code, using multiple threads might be detrimental. Returns ------- numpy.ndarray Image data from the specified pages. See the TiffPage.asarray function for operations that are applied (or not) to the raw data stored in the file.
[ "Return", "image", "data", "from", "selected", "TIFF", "page", "(", "s", ")", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2061-L2179
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.series
def series(self): """Return related pages as TiffPageSeries. Side effect: after calling this function, TiffFile.pages might contain TiffPage and TiffFrame instances. """ if not self.pages: return [] useframes = self.pages.useframes keyframe = self.pages.keyframe.index series = [] for name in ('lsm', 'ome', 'imagej', 'shaped', 'fluoview', 'sis', 'uniform', 'mdgel'): if getattr(self, 'is_' + name, False): series = getattr(self, '_series_' + name)() break self.pages.useframes = useframes self.pages.keyframe = keyframe if not series: series = self._series_generic() # remove empty series, e.g. in MD Gel files series = [s for s in series if product(s.shape) > 0] for i, s in enumerate(series): s.index = i return series
python
def series(self): """Return related pages as TiffPageSeries. Side effect: after calling this function, TiffFile.pages might contain TiffPage and TiffFrame instances. """ if not self.pages: return [] useframes = self.pages.useframes keyframe = self.pages.keyframe.index series = [] for name in ('lsm', 'ome', 'imagej', 'shaped', 'fluoview', 'sis', 'uniform', 'mdgel'): if getattr(self, 'is_' + name, False): series = getattr(self, '_series_' + name)() break self.pages.useframes = useframes self.pages.keyframe = keyframe if not series: series = self._series_generic() # remove empty series, e.g. in MD Gel files series = [s for s in series if product(s.shape) > 0] for i, s in enumerate(series): s.index = i return series
[ "def", "series", "(", "self", ")", ":", "if", "not", "self", ".", "pages", ":", "return", "[", "]", "useframes", "=", "self", ".", "pages", ".", "useframes", "keyframe", "=", "self", ".", "pages", ".", "keyframe", ".", "index", "series", "=", "[", "]", "for", "name", "in", "(", "'lsm'", ",", "'ome'", ",", "'imagej'", ",", "'shaped'", ",", "'fluoview'", ",", "'sis'", ",", "'uniform'", ",", "'mdgel'", ")", ":", "if", "getattr", "(", "self", ",", "'is_'", "+", "name", ",", "False", ")", ":", "series", "=", "getattr", "(", "self", ",", "'_series_'", "+", "name", ")", "(", ")", "break", "self", ".", "pages", ".", "useframes", "=", "useframes", "self", ".", "pages", ".", "keyframe", "=", "keyframe", "if", "not", "series", ":", "series", "=", "self", ".", "_series_generic", "(", ")", "# remove empty series, e.g. in MD Gel files", "series", "=", "[", "s", "for", "s", "in", "series", "if", "product", "(", "s", ".", "shape", ")", ">", "0", "]", "for", "i", ",", "s", "in", "enumerate", "(", "series", ")", ":", "s", ".", "index", "=", "i", "return", "series" ]
Return related pages as TiffPageSeries. Side effect: after calling this function, TiffFile.pages might contain TiffPage and TiffFrame instances.
[ "Return", "related", "pages", "as", "TiffPageSeries", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2182-L2210
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_generic
def _series_generic(self): """Return image series in file. A series is a sequence of TiffPages with the same hash. """ pages = self.pages pages._clear(False) pages.useframes = False if pages.cache: pages._load() result = [] keys = [] series = {} for page in pages: if not page.shape or product(page.shape) == 0: continue key = page.hash if key in series: series[key].append(page) else: keys.append(key) series[key] = [page] for key in keys: pages = series[key] page = pages[0] shape = page.shape axes = page.axes if len(pages) > 1: shape = (len(pages),) + shape axes = 'I' + axes result.append(TiffPageSeries(pages, shape, page.dtype, axes, kind='Generic')) self.is_uniform = len(result) == 1 return result
python
def _series_generic(self): """Return image series in file. A series is a sequence of TiffPages with the same hash. """ pages = self.pages pages._clear(False) pages.useframes = False if pages.cache: pages._load() result = [] keys = [] series = {} for page in pages: if not page.shape or product(page.shape) == 0: continue key = page.hash if key in series: series[key].append(page) else: keys.append(key) series[key] = [page] for key in keys: pages = series[key] page = pages[0] shape = page.shape axes = page.axes if len(pages) > 1: shape = (len(pages),) + shape axes = 'I' + axes result.append(TiffPageSeries(pages, shape, page.dtype, axes, kind='Generic')) self.is_uniform = len(result) == 1 return result
[ "def", "_series_generic", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", "pages", ".", "_clear", "(", "False", ")", "pages", ".", "useframes", "=", "False", "if", "pages", ".", "cache", ":", "pages", ".", "_load", "(", ")", "result", "=", "[", "]", "keys", "=", "[", "]", "series", "=", "{", "}", "for", "page", "in", "pages", ":", "if", "not", "page", ".", "shape", "or", "product", "(", "page", ".", "shape", ")", "==", "0", ":", "continue", "key", "=", "page", ".", "hash", "if", "key", "in", "series", ":", "series", "[", "key", "]", ".", "append", "(", "page", ")", "else", ":", "keys", ".", "append", "(", "key", ")", "series", "[", "key", "]", "=", "[", "page", "]", "for", "key", "in", "keys", ":", "pages", "=", "series", "[", "key", "]", "page", "=", "pages", "[", "0", "]", "shape", "=", "page", ".", "shape", "axes", "=", "page", ".", "axes", "if", "len", "(", "pages", ")", ">", "1", ":", "shape", "=", "(", "len", "(", "pages", ")", ",", ")", "+", "shape", "axes", "=", "'I'", "+", "axes", "result", ".", "append", "(", "TiffPageSeries", "(", "pages", ",", "shape", ",", "page", ".", "dtype", ",", "axes", ",", "kind", "=", "'Generic'", ")", ")", "self", ".", "is_uniform", "=", "len", "(", "result", ")", "==", "1", "return", "result" ]
Return image series in file. A series is a sequence of TiffPages with the same hash.
[ "Return", "image", "series", "in", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2212-L2249
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_uniform
def _series_uniform(self): """Return all images in file as single series.""" page = self.pages[0] shape = page.shape axes = page.axes dtype = page.dtype validate = not (page.is_scanimage or page.is_nih) pages = self.pages._getlist(validate=validate) lenpages = len(pages) if lenpages > 1: shape = (lenpages,) + shape axes = 'I' + axes if page.is_scanimage: kind = 'ScanImage' elif page.is_nih: kind = 'NIHImage' else: kind = 'Uniform' return [TiffPageSeries(pages, shape, dtype, axes, kind=kind)]
python
def _series_uniform(self): """Return all images in file as single series.""" page = self.pages[0] shape = page.shape axes = page.axes dtype = page.dtype validate = not (page.is_scanimage or page.is_nih) pages = self.pages._getlist(validate=validate) lenpages = len(pages) if lenpages > 1: shape = (lenpages,) + shape axes = 'I' + axes if page.is_scanimage: kind = 'ScanImage' elif page.is_nih: kind = 'NIHImage' else: kind = 'Uniform' return [TiffPageSeries(pages, shape, dtype, axes, kind=kind)]
[ "def", "_series_uniform", "(", "self", ")", ":", "page", "=", "self", ".", "pages", "[", "0", "]", "shape", "=", "page", ".", "shape", "axes", "=", "page", ".", "axes", "dtype", "=", "page", ".", "dtype", "validate", "=", "not", "(", "page", ".", "is_scanimage", "or", "page", ".", "is_nih", ")", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "validate", "=", "validate", ")", "lenpages", "=", "len", "(", "pages", ")", "if", "lenpages", ">", "1", ":", "shape", "=", "(", "lenpages", ",", ")", "+", "shape", "axes", "=", "'I'", "+", "axes", "if", "page", ".", "is_scanimage", ":", "kind", "=", "'ScanImage'", "elif", "page", ".", "is_nih", ":", "kind", "=", "'NIHImage'", "else", ":", "kind", "=", "'Uniform'", "return", "[", "TiffPageSeries", "(", "pages", ",", "shape", ",", "dtype", ",", "axes", ",", "kind", "=", "kind", ")", "]" ]
Return all images in file as single series.
[ "Return", "all", "images", "in", "file", "as", "single", "series", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2251-L2269
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_shaped
def _series_shaped(self): """Return image series in "shaped" file.""" pages = self.pages pages.useframes = True lenpages = len(pages) def append_series(series, pages, axes, shape, reshape, name, truncated): page = pages[0] if not axes: shape = page.shape axes = page.axes if len(pages) > 1: shape = (len(pages),) + shape axes = 'Q' + axes size = product(shape) resize = product(reshape) if page.is_contiguous and resize > size and resize % size == 0: if truncated is None: truncated = True axes = 'Q' + axes shape = (resize // size,) + shape try: axes = reshape_axes(axes, shape, reshape) shape = reshape except ValueError as exc: log.warning('Shaped series: %s: %s', exc.__class__.__name__, exc) series.append( TiffPageSeries(pages, shape, page.dtype, axes, name=name, kind='Shaped', truncated=truncated)) keyframe = axes = shape = reshape = name = None series = [] index = 0 while True: if index >= lenpages: break # new keyframe; start of new series pages.keyframe = index keyframe = pages.keyframe if not keyframe.is_shaped: log.warning( 'Shaped series: invalid metadata or corrupted file') return None # read metadata axes = None shape = None metadata = json_description_metadata(keyframe.is_shaped) name = metadata.get('name', '') reshape = metadata['shape'] truncated = metadata.get('truncated', None) if 'axes' in metadata: axes = metadata['axes'] if len(axes) == len(reshape): shape = reshape else: axes = '' log.warning('Shaped series: axes do not match shape') # skip pages if possible spages = [keyframe] size = product(reshape) npages, mod = divmod(size, product(keyframe.shape)) if mod: log.warning( 'Shaped series: series shape does not match page shape') return None if 1 < npages <= lenpages - index: size *= keyframe._dtype.itemsize if truncated: npages = 1 elif (keyframe.is_final and keyframe.offset + size < pages[index+1].offset): truncated = False else: # need to read all pages for series truncated = False for j in range(index+1, index+npages): page = pages[j] page.keyframe = keyframe spages.append(page) append_series(series, spages, axes, shape, reshape, name, truncated) index += npages self.is_uniform = len(series) == 1 return series
python
def _series_shaped(self): """Return image series in "shaped" file.""" pages = self.pages pages.useframes = True lenpages = len(pages) def append_series(series, pages, axes, shape, reshape, name, truncated): page = pages[0] if not axes: shape = page.shape axes = page.axes if len(pages) > 1: shape = (len(pages),) + shape axes = 'Q' + axes size = product(shape) resize = product(reshape) if page.is_contiguous and resize > size and resize % size == 0: if truncated is None: truncated = True axes = 'Q' + axes shape = (resize // size,) + shape try: axes = reshape_axes(axes, shape, reshape) shape = reshape except ValueError as exc: log.warning('Shaped series: %s: %s', exc.__class__.__name__, exc) series.append( TiffPageSeries(pages, shape, page.dtype, axes, name=name, kind='Shaped', truncated=truncated)) keyframe = axes = shape = reshape = name = None series = [] index = 0 while True: if index >= lenpages: break # new keyframe; start of new series pages.keyframe = index keyframe = pages.keyframe if not keyframe.is_shaped: log.warning( 'Shaped series: invalid metadata or corrupted file') return None # read metadata axes = None shape = None metadata = json_description_metadata(keyframe.is_shaped) name = metadata.get('name', '') reshape = metadata['shape'] truncated = metadata.get('truncated', None) if 'axes' in metadata: axes = metadata['axes'] if len(axes) == len(reshape): shape = reshape else: axes = '' log.warning('Shaped series: axes do not match shape') # skip pages if possible spages = [keyframe] size = product(reshape) npages, mod = divmod(size, product(keyframe.shape)) if mod: log.warning( 'Shaped series: series shape does not match page shape') return None if 1 < npages <= lenpages - index: size *= keyframe._dtype.itemsize if truncated: npages = 1 elif (keyframe.is_final and keyframe.offset + size < pages[index+1].offset): truncated = False else: # need to read all pages for series truncated = False for j in range(index+1, index+npages): page = pages[j] page.keyframe = keyframe spages.append(page) append_series(series, spages, axes, shape, reshape, name, truncated) index += npages self.is_uniform = len(series) == 1 return series
[ "def", "_series_shaped", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", "pages", ".", "useframes", "=", "True", "lenpages", "=", "len", "(", "pages", ")", "def", "append_series", "(", "series", ",", "pages", ",", "axes", ",", "shape", ",", "reshape", ",", "name", ",", "truncated", ")", ":", "page", "=", "pages", "[", "0", "]", "if", "not", "axes", ":", "shape", "=", "page", ".", "shape", "axes", "=", "page", ".", "axes", "if", "len", "(", "pages", ")", ">", "1", ":", "shape", "=", "(", "len", "(", "pages", ")", ",", ")", "+", "shape", "axes", "=", "'Q'", "+", "axes", "size", "=", "product", "(", "shape", ")", "resize", "=", "product", "(", "reshape", ")", "if", "page", ".", "is_contiguous", "and", "resize", ">", "size", "and", "resize", "%", "size", "==", "0", ":", "if", "truncated", "is", "None", ":", "truncated", "=", "True", "axes", "=", "'Q'", "+", "axes", "shape", "=", "(", "resize", "//", "size", ",", ")", "+", "shape", "try", ":", "axes", "=", "reshape_axes", "(", "axes", ",", "shape", ",", "reshape", ")", "shape", "=", "reshape", "except", "ValueError", "as", "exc", ":", "log", ".", "warning", "(", "'Shaped series: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "series", ".", "append", "(", "TiffPageSeries", "(", "pages", ",", "shape", ",", "page", ".", "dtype", ",", "axes", ",", "name", "=", "name", ",", "kind", "=", "'Shaped'", ",", "truncated", "=", "truncated", ")", ")", "keyframe", "=", "axes", "=", "shape", "=", "reshape", "=", "name", "=", "None", "series", "=", "[", "]", "index", "=", "0", "while", "True", ":", "if", "index", ">=", "lenpages", ":", "break", "# new keyframe; start of new series", "pages", ".", "keyframe", "=", "index", "keyframe", "=", "pages", ".", "keyframe", "if", "not", "keyframe", ".", "is_shaped", ":", "log", ".", "warning", "(", "'Shaped series: invalid metadata or corrupted file'", ")", "return", "None", "# read metadata", "axes", "=", "None", "shape", "=", "None", "metadata", "=", "json_description_metadata", "(", "keyframe", ".", "is_shaped", ")", "name", "=", "metadata", ".", "get", "(", "'name'", ",", "''", ")", "reshape", "=", "metadata", "[", "'shape'", "]", "truncated", "=", "metadata", ".", "get", "(", "'truncated'", ",", "None", ")", "if", "'axes'", "in", "metadata", ":", "axes", "=", "metadata", "[", "'axes'", "]", "if", "len", "(", "axes", ")", "==", "len", "(", "reshape", ")", ":", "shape", "=", "reshape", "else", ":", "axes", "=", "''", "log", ".", "warning", "(", "'Shaped series: axes do not match shape'", ")", "# skip pages if possible", "spages", "=", "[", "keyframe", "]", "size", "=", "product", "(", "reshape", ")", "npages", ",", "mod", "=", "divmod", "(", "size", ",", "product", "(", "keyframe", ".", "shape", ")", ")", "if", "mod", ":", "log", ".", "warning", "(", "'Shaped series: series shape does not match page shape'", ")", "return", "None", "if", "1", "<", "npages", "<=", "lenpages", "-", "index", ":", "size", "*=", "keyframe", ".", "_dtype", ".", "itemsize", "if", "truncated", ":", "npages", "=", "1", "elif", "(", "keyframe", ".", "is_final", "and", "keyframe", ".", "offset", "+", "size", "<", "pages", "[", "index", "+", "1", "]", ".", "offset", ")", ":", "truncated", "=", "False", "else", ":", "# need to read all pages for series", "truncated", "=", "False", "for", "j", "in", "range", "(", "index", "+", "1", ",", "index", "+", "npages", ")", ":", "page", "=", "pages", "[", "j", "]", "page", ".", "keyframe", "=", "keyframe", "spages", ".", "append", "(", "page", ")", "append_series", "(", "series", ",", "spages", ",", "axes", ",", "shape", ",", "reshape", ",", "name", ",", "truncated", ")", "index", "+=", "npages", "self", ".", "is_uniform", "=", "len", "(", "series", ")", "==", "1", "return", "series" ]
Return image series in "shaped" file.
[ "Return", "image", "series", "in", "shaped", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2271-L2358
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_imagej
def _series_imagej(self): """Return image series in ImageJ file.""" # ImageJ's dimension order is always TZCYXS # TODO: fix loading of color, composite, or palette images pages = self.pages pages.useframes = True pages.keyframe = 0 page = pages[0] ij = self.imagej_metadata def is_hyperstack(): # ImageJ hyperstack store all image metadata in the first page and # image data are stored contiguously before the second page, if any if not page.is_final: return False images = ij.get('images', 0) if images <= 1: return False offset, count = page.is_contiguous if (count != product(page.shape) * page.bitspersample // 8 or offset + count*images > self.filehandle.size): raise ValueError() # check that next page is stored after data if len(pages) > 1 and offset + count*images > pages[1].offset: return False return True try: hyperstack = is_hyperstack() except ValueError: log.warning('ImageJ series: invalid metadata or corrupted file') return None if hyperstack: # no need to read other pages pages = [page] else: pages = pages[:] shape = [] axes = [] if 'frames' in ij: shape.append(ij['frames']) axes.append('T') if 'slices' in ij: shape.append(ij['slices']) axes.append('Z') if 'channels' in ij and not (page.photometric == 2 and not ij.get('hyperstack', False)): shape.append(ij['channels']) axes.append('C') remain = ij.get('images', len(pages))//(product(shape) if shape else 1) if remain > 1: shape.append(remain) axes.append('I') if page.axes[0] == 'I': # contiguous multiple images shape.extend(page.shape[1:]) axes.extend(page.axes[1:]) elif page.axes[:2] == 'SI': # color-mapped contiguous multiple images shape = page.shape[0:1] + tuple(shape) + page.shape[2:] axes = list(page.axes[0]) + axes + list(page.axes[2:]) else: shape.extend(page.shape) axes.extend(page.axes) truncated = ( hyperstack and len(self.pages) == 1 and page.is_contiguous[1] != product(shape) * page.bitspersample // 8) self.is_uniform = True return [TiffPageSeries(pages, shape, page.dtype, axes, kind='ImageJ', truncated=truncated)]
python
def _series_imagej(self): """Return image series in ImageJ file.""" # ImageJ's dimension order is always TZCYXS # TODO: fix loading of color, composite, or palette images pages = self.pages pages.useframes = True pages.keyframe = 0 page = pages[0] ij = self.imagej_metadata def is_hyperstack(): # ImageJ hyperstack store all image metadata in the first page and # image data are stored contiguously before the second page, if any if not page.is_final: return False images = ij.get('images', 0) if images <= 1: return False offset, count = page.is_contiguous if (count != product(page.shape) * page.bitspersample // 8 or offset + count*images > self.filehandle.size): raise ValueError() # check that next page is stored after data if len(pages) > 1 and offset + count*images > pages[1].offset: return False return True try: hyperstack = is_hyperstack() except ValueError: log.warning('ImageJ series: invalid metadata or corrupted file') return None if hyperstack: # no need to read other pages pages = [page] else: pages = pages[:] shape = [] axes = [] if 'frames' in ij: shape.append(ij['frames']) axes.append('T') if 'slices' in ij: shape.append(ij['slices']) axes.append('Z') if 'channels' in ij and not (page.photometric == 2 and not ij.get('hyperstack', False)): shape.append(ij['channels']) axes.append('C') remain = ij.get('images', len(pages))//(product(shape) if shape else 1) if remain > 1: shape.append(remain) axes.append('I') if page.axes[0] == 'I': # contiguous multiple images shape.extend(page.shape[1:]) axes.extend(page.axes[1:]) elif page.axes[:2] == 'SI': # color-mapped contiguous multiple images shape = page.shape[0:1] + tuple(shape) + page.shape[2:] axes = list(page.axes[0]) + axes + list(page.axes[2:]) else: shape.extend(page.shape) axes.extend(page.axes) truncated = ( hyperstack and len(self.pages) == 1 and page.is_contiguous[1] != product(shape) * page.bitspersample // 8) self.is_uniform = True return [TiffPageSeries(pages, shape, page.dtype, axes, kind='ImageJ', truncated=truncated)]
[ "def", "_series_imagej", "(", "self", ")", ":", "# ImageJ's dimension order is always TZCYXS", "# TODO: fix loading of color, composite, or palette images", "pages", "=", "self", ".", "pages", "pages", ".", "useframes", "=", "True", "pages", ".", "keyframe", "=", "0", "page", "=", "pages", "[", "0", "]", "ij", "=", "self", ".", "imagej_metadata", "def", "is_hyperstack", "(", ")", ":", "# ImageJ hyperstack store all image metadata in the first page and", "# image data are stored contiguously before the second page, if any", "if", "not", "page", ".", "is_final", ":", "return", "False", "images", "=", "ij", ".", "get", "(", "'images'", ",", "0", ")", "if", "images", "<=", "1", ":", "return", "False", "offset", ",", "count", "=", "page", ".", "is_contiguous", "if", "(", "count", "!=", "product", "(", "page", ".", "shape", ")", "*", "page", ".", "bitspersample", "//", "8", "or", "offset", "+", "count", "*", "images", ">", "self", ".", "filehandle", ".", "size", ")", ":", "raise", "ValueError", "(", ")", "# check that next page is stored after data", "if", "len", "(", "pages", ")", ">", "1", "and", "offset", "+", "count", "*", "images", ">", "pages", "[", "1", "]", ".", "offset", ":", "return", "False", "return", "True", "try", ":", "hyperstack", "=", "is_hyperstack", "(", ")", "except", "ValueError", ":", "log", ".", "warning", "(", "'ImageJ series: invalid metadata or corrupted file'", ")", "return", "None", "if", "hyperstack", ":", "# no need to read other pages", "pages", "=", "[", "page", "]", "else", ":", "pages", "=", "pages", "[", ":", "]", "shape", "=", "[", "]", "axes", "=", "[", "]", "if", "'frames'", "in", "ij", ":", "shape", ".", "append", "(", "ij", "[", "'frames'", "]", ")", "axes", ".", "append", "(", "'T'", ")", "if", "'slices'", "in", "ij", ":", "shape", ".", "append", "(", "ij", "[", "'slices'", "]", ")", "axes", ".", "append", "(", "'Z'", ")", "if", "'channels'", "in", "ij", "and", "not", "(", "page", ".", "photometric", "==", "2", "and", "not", "ij", ".", "get", "(", "'hyperstack'", ",", "False", ")", ")", ":", "shape", ".", "append", "(", "ij", "[", "'channels'", "]", ")", "axes", ".", "append", "(", "'C'", ")", "remain", "=", "ij", ".", "get", "(", "'images'", ",", "len", "(", "pages", ")", ")", "//", "(", "product", "(", "shape", ")", "if", "shape", "else", "1", ")", "if", "remain", ">", "1", ":", "shape", ".", "append", "(", "remain", ")", "axes", ".", "append", "(", "'I'", ")", "if", "page", ".", "axes", "[", "0", "]", "==", "'I'", ":", "# contiguous multiple images", "shape", ".", "extend", "(", "page", ".", "shape", "[", "1", ":", "]", ")", "axes", ".", "extend", "(", "page", ".", "axes", "[", "1", ":", "]", ")", "elif", "page", ".", "axes", "[", ":", "2", "]", "==", "'SI'", ":", "# color-mapped contiguous multiple images", "shape", "=", "page", ".", "shape", "[", "0", ":", "1", "]", "+", "tuple", "(", "shape", ")", "+", "page", ".", "shape", "[", "2", ":", "]", "axes", "=", "list", "(", "page", ".", "axes", "[", "0", "]", ")", "+", "axes", "+", "list", "(", "page", ".", "axes", "[", "2", ":", "]", ")", "else", ":", "shape", ".", "extend", "(", "page", ".", "shape", ")", "axes", ".", "extend", "(", "page", ".", "axes", ")", "truncated", "=", "(", "hyperstack", "and", "len", "(", "self", ".", "pages", ")", "==", "1", "and", "page", ".", "is_contiguous", "[", "1", "]", "!=", "product", "(", "shape", ")", "*", "page", ".", "bitspersample", "//", "8", ")", "self", ".", "is_uniform", "=", "True", "return", "[", "TiffPageSeries", "(", "pages", ",", "shape", ",", "page", ".", "dtype", ",", "axes", ",", "kind", "=", "'ImageJ'", ",", "truncated", "=", "truncated", ")", "]" ]
Return image series in ImageJ file.
[ "Return", "image", "series", "in", "ImageJ", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2360-L2433
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_fluoview
def _series_fluoview(self): """Return image series in FluoView file.""" pages = self.pages._getlist(validate=False) mm = self.fluoview_metadata mmhd = list(reversed(mm['Dimensions'])) axes = ''.join(TIFF.MM_DIMENSIONS.get(i[0].upper(), 'Q') for i in mmhd if i[1] > 1) shape = tuple(int(i[1]) for i in mmhd if i[1] > 1) self.is_uniform = True return [TiffPageSeries(pages, shape, pages[0].dtype, axes, name=mm['ImageName'], kind='FluoView')]
python
def _series_fluoview(self): """Return image series in FluoView file.""" pages = self.pages._getlist(validate=False) mm = self.fluoview_metadata mmhd = list(reversed(mm['Dimensions'])) axes = ''.join(TIFF.MM_DIMENSIONS.get(i[0].upper(), 'Q') for i in mmhd if i[1] > 1) shape = tuple(int(i[1]) for i in mmhd if i[1] > 1) self.is_uniform = True return [TiffPageSeries(pages, shape, pages[0].dtype, axes, name=mm['ImageName'], kind='FluoView')]
[ "def", "_series_fluoview", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "validate", "=", "False", ")", "mm", "=", "self", ".", "fluoview_metadata", "mmhd", "=", "list", "(", "reversed", "(", "mm", "[", "'Dimensions'", "]", ")", ")", "axes", "=", "''", ".", "join", "(", "TIFF", ".", "MM_DIMENSIONS", ".", "get", "(", "i", "[", "0", "]", ".", "upper", "(", ")", ",", "'Q'", ")", "for", "i", "in", "mmhd", "if", "i", "[", "1", "]", ">", "1", ")", "shape", "=", "tuple", "(", "int", "(", "i", "[", "1", "]", ")", "for", "i", "in", "mmhd", "if", "i", "[", "1", "]", ">", "1", ")", "self", ".", "is_uniform", "=", "True", "return", "[", "TiffPageSeries", "(", "pages", ",", "shape", ",", "pages", "[", "0", "]", ".", "dtype", ",", "axes", ",", "name", "=", "mm", "[", "'ImageName'", "]", ",", "kind", "=", "'FluoView'", ")", "]" ]
Return image series in FluoView file.
[ "Return", "image", "series", "in", "FluoView", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2435-L2446
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_mdgel
def _series_mdgel(self): """Return image series in MD Gel file.""" # only a single page, scaled according to metadata in second page self.pages.useframes = False self.pages.keyframe = 0 md = self.mdgel_metadata if md['FileTag'] in (2, 128): dtype = numpy.dtype('float32') scale = md['ScalePixel'] scale = scale[0] / scale[1] # rational if md['FileTag'] == 2: # squary root data format def transform(a): return a.astype('float32')**2 * scale else: def transform(a): return a.astype('float32') * scale else: transform = None page = self.pages[0] self.is_uniform = False return [TiffPageSeries([page], page.shape, dtype, page.axes, transform=transform, kind='MDGel')]
python
def _series_mdgel(self): """Return image series in MD Gel file.""" # only a single page, scaled according to metadata in second page self.pages.useframes = False self.pages.keyframe = 0 md = self.mdgel_metadata if md['FileTag'] in (2, 128): dtype = numpy.dtype('float32') scale = md['ScalePixel'] scale = scale[0] / scale[1] # rational if md['FileTag'] == 2: # squary root data format def transform(a): return a.astype('float32')**2 * scale else: def transform(a): return a.astype('float32') * scale else: transform = None page = self.pages[0] self.is_uniform = False return [TiffPageSeries([page], page.shape, dtype, page.axes, transform=transform, kind='MDGel')]
[ "def", "_series_mdgel", "(", "self", ")", ":", "# only a single page, scaled according to metadata in second page", "self", ".", "pages", ".", "useframes", "=", "False", "self", ".", "pages", ".", "keyframe", "=", "0", "md", "=", "self", ".", "mdgel_metadata", "if", "md", "[", "'FileTag'", "]", "in", "(", "2", ",", "128", ")", ":", "dtype", "=", "numpy", ".", "dtype", "(", "'float32'", ")", "scale", "=", "md", "[", "'ScalePixel'", "]", "scale", "=", "scale", "[", "0", "]", "/", "scale", "[", "1", "]", "# rational", "if", "md", "[", "'FileTag'", "]", "==", "2", ":", "# squary root data format", "def", "transform", "(", "a", ")", ":", "return", "a", ".", "astype", "(", "'float32'", ")", "**", "2", "*", "scale", "else", ":", "def", "transform", "(", "a", ")", ":", "return", "a", ".", "astype", "(", "'float32'", ")", "*", "scale", "else", ":", "transform", "=", "None", "page", "=", "self", ".", "pages", "[", "0", "]", "self", ".", "is_uniform", "=", "False", "return", "[", "TiffPageSeries", "(", "[", "page", "]", ",", "page", ".", "shape", ",", "dtype", ",", "page", ".", "axes", ",", "transform", "=", "transform", ",", "kind", "=", "'MDGel'", ")", "]" ]
Return image series in MD Gel file.
[ "Return", "image", "series", "in", "MD", "Gel", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2448-L2470
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_sis
def _series_sis(self): """Return image series in Olympus SIS file.""" pages = self.pages._getlist(validate=False) page = pages[0] lenpages = len(pages) md = self.sis_metadata if 'shape' in md and 'axes' in md: shape = md['shape'] + page.shape axes = md['axes'] + page.axes elif lenpages == 1: shape = page.shape axes = page.axes else: shape = (lenpages,) + page.shape axes = 'I' + page.axes self.is_uniform = True return [TiffPageSeries(pages, shape, page.dtype, axes, kind='SIS')]
python
def _series_sis(self): """Return image series in Olympus SIS file.""" pages = self.pages._getlist(validate=False) page = pages[0] lenpages = len(pages) md = self.sis_metadata if 'shape' in md and 'axes' in md: shape = md['shape'] + page.shape axes = md['axes'] + page.axes elif lenpages == 1: shape = page.shape axes = page.axes else: shape = (lenpages,) + page.shape axes = 'I' + page.axes self.is_uniform = True return [TiffPageSeries(pages, shape, page.dtype, axes, kind='SIS')]
[ "def", "_series_sis", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "validate", "=", "False", ")", "page", "=", "pages", "[", "0", "]", "lenpages", "=", "len", "(", "pages", ")", "md", "=", "self", ".", "sis_metadata", "if", "'shape'", "in", "md", "and", "'axes'", "in", "md", ":", "shape", "=", "md", "[", "'shape'", "]", "+", "page", ".", "shape", "axes", "=", "md", "[", "'axes'", "]", "+", "page", ".", "axes", "elif", "lenpages", "==", "1", ":", "shape", "=", "page", ".", "shape", "axes", "=", "page", ".", "axes", "else", ":", "shape", "=", "(", "lenpages", ",", ")", "+", "page", ".", "shape", "axes", "=", "'I'", "+", "page", ".", "axes", "self", ".", "is_uniform", "=", "True", "return", "[", "TiffPageSeries", "(", "pages", ",", "shape", ",", "page", ".", "dtype", ",", "axes", ",", "kind", "=", "'SIS'", ")", "]" ]
Return image series in Olympus SIS file.
[ "Return", "image", "series", "in", "Olympus", "SIS", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2472-L2488
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_ome
def _series_ome(self): """Return image series in OME-TIFF file(s).""" from xml.etree import cElementTree as etree # delayed import omexml = self.pages[0].description try: root = etree.fromstring(omexml) except etree.ParseError as exc: # TODO: test badly encoded OME-XML log.warning('OME series: %s: %s', exc.__class__.__name__, exc) try: # might work on Python 2 omexml = omexml.decode('utf-8', 'ignore').encode('utf-8') root = etree.fromstring(omexml) except Exception: return None self.pages.cache = True self.pages.useframes = True self.pages.keyframe = 0 self.pages._load(keyframe=None) uuid = root.attrib.get('UUID', None) self._files = {uuid: self} dirname = self._fh.dirname modulo = {} series = [] for element in root: if element.tag.endswith('BinaryOnly'): # TODO: load OME-XML from master or companion file log.warning('OME series: not an ome-tiff master file') break if element.tag.endswith('StructuredAnnotations'): for annot in element: if not annot.attrib.get('Namespace', '').endswith('modulo'): continue for value in annot: for modul in value: for along in modul: if not along.tag[:-1].endswith('Along'): continue axis = along.tag[-1] newaxis = along.attrib.get('Type', 'other') newaxis = TIFF.AXES_LABELS[newaxis] if 'Start' in along.attrib: step = float(along.attrib.get('Step', 1)) start = float(along.attrib['Start']) stop = float(along.attrib['End']) + step labels = numpy.arange(start, stop, step) else: labels = [label.text for label in along if label.tag.endswith('Label')] modulo[axis] = (newaxis, labels) if not element.tag.endswith('Image'): continue attr = element.attrib name = attr.get('Name', None) for pixels in element: if not pixels.tag.endswith('Pixels'): continue attr = pixels.attrib # dtype = attr.get('PixelType', None) axes = ''.join(reversed(attr['DimensionOrder'])) shape = idxshape = list(int(attr['Size'+ax]) for ax in axes) size = product(shape[:-2]) ifds = None spp = 1 # samples per pixel for data in pixels: if data.tag.endswith('Channel'): attr = data.attrib if ifds is None: spp = int(attr.get('SamplesPerPixel', spp)) ifds = [None] * (size // spp) if spp > 1: # correct channel dimension for spp idxshape = list((shape[i] // spp if ax == 'C' else shape[i]) for i, ax in enumerate(axes)) elif int(attr.get('SamplesPerPixel', 1)) != spp: raise ValueError( 'cannot handle differing SamplesPerPixel') continue if ifds is None: ifds = [None] * (size // spp) if not data.tag.endswith('TiffData'): continue attr = data.attrib ifd = int(attr.get('IFD', 0)) num = int(attr.get('NumPlanes', 1 if 'IFD' in attr else 0)) num = int(attr.get('PlaneCount', num)) idx = [int(attr.get('First'+ax, 0)) for ax in axes[:-2]] try: idx = numpy.ravel_multi_index(idx, idxshape[:-2]) except ValueError: # ImageJ produces invalid ome-xml when cropping log.warning('OME series: invalid TiffData index') continue for uuid in data: if not uuid.tag.endswith('UUID'): continue if uuid.text not in self._files: if not self._multifile: # abort reading multifile OME series # and fall back to generic series return [] fname = uuid.attrib['FileName'] try: tif = TiffFile(os.path.join(dirname, fname)) tif.pages.cache = True tif.pages.useframes = True tif.pages.keyframe = 0 tif.pages._load(keyframe=None) except (IOError, FileNotFoundError, ValueError): log.warning("OME series: failed to read '%s'", fname) break self._files[uuid.text] = tif tif.close() pages = self._files[uuid.text].pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: log.warning('OME series: index out of range') # only process first UUID break else: pages = self.pages try: for i in range(num if num else min(len(pages), len(ifds))): ifds[idx + i] = pages[ifd + i] except IndexError: log.warning('OME series: index out of range') if all(i is None for i in ifds): # skip images without data continue # find a keyframe keyframe = None for i in ifds: # try find a TiffPage if i and i == i.keyframe: keyframe = i break if keyframe is None: # reload a TiffPage from file for i, keyframe in enumerate(ifds): if keyframe: keyframe.parent.pages.keyframe = keyframe.index keyframe = keyframe.parent.pages[keyframe.index] ifds[i] = keyframe break # move channel axis to match PlanarConfiguration storage # TODO: is this a bug or a inconsistency in the OME spec? if spp > 1: if keyframe.planarconfig == 1 and axes[-1] != 'C': i = axes.index('C') axes = axes[:i] + axes[i+1:] + axes[i:i+1] shape = shape[:i] + shape[i+1:] + shape[i:i+1] # FIXME: this implementation assumes the last dimensions are # stored in TIFF pages. Apparently that is not always the case. # For now, verify that shapes of keyframe and series match # If not, skip series. if keyframe.shape != tuple(shape[-len(keyframe.shape):]): log.warning('OME series: incompatible page shape %s; ' 'expected %s', keyframe.shape, tuple(shape[-len(keyframe.shape):])) del ifds continue # set a keyframe on all IFDs for i in ifds: if i is not None: i.keyframe = keyframe series.append( TiffPageSeries(ifds, shape, keyframe.dtype, axes, parent=self, name=name, kind='OME')) del ifds for serie in series: shape = list(serie.shape) for axis, (newaxis, labels) in modulo.items(): i = serie.axes.index(axis) size = len(labels) if shape[i] == size: serie.axes = serie.axes.replace(axis, newaxis, 1) else: shape[i] //= size shape.insert(i+1, size) serie.axes = serie.axes.replace(axis, axis+newaxis, 1) serie.shape = tuple(shape) # squeeze dimensions for serie in series: serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes) self.is_uniform = len(series) == 1 return series
python
def _series_ome(self): """Return image series in OME-TIFF file(s).""" from xml.etree import cElementTree as etree # delayed import omexml = self.pages[0].description try: root = etree.fromstring(omexml) except etree.ParseError as exc: # TODO: test badly encoded OME-XML log.warning('OME series: %s: %s', exc.__class__.__name__, exc) try: # might work on Python 2 omexml = omexml.decode('utf-8', 'ignore').encode('utf-8') root = etree.fromstring(omexml) except Exception: return None self.pages.cache = True self.pages.useframes = True self.pages.keyframe = 0 self.pages._load(keyframe=None) uuid = root.attrib.get('UUID', None) self._files = {uuid: self} dirname = self._fh.dirname modulo = {} series = [] for element in root: if element.tag.endswith('BinaryOnly'): # TODO: load OME-XML from master or companion file log.warning('OME series: not an ome-tiff master file') break if element.tag.endswith('StructuredAnnotations'): for annot in element: if not annot.attrib.get('Namespace', '').endswith('modulo'): continue for value in annot: for modul in value: for along in modul: if not along.tag[:-1].endswith('Along'): continue axis = along.tag[-1] newaxis = along.attrib.get('Type', 'other') newaxis = TIFF.AXES_LABELS[newaxis] if 'Start' in along.attrib: step = float(along.attrib.get('Step', 1)) start = float(along.attrib['Start']) stop = float(along.attrib['End']) + step labels = numpy.arange(start, stop, step) else: labels = [label.text for label in along if label.tag.endswith('Label')] modulo[axis] = (newaxis, labels) if not element.tag.endswith('Image'): continue attr = element.attrib name = attr.get('Name', None) for pixels in element: if not pixels.tag.endswith('Pixels'): continue attr = pixels.attrib # dtype = attr.get('PixelType', None) axes = ''.join(reversed(attr['DimensionOrder'])) shape = idxshape = list(int(attr['Size'+ax]) for ax in axes) size = product(shape[:-2]) ifds = None spp = 1 # samples per pixel for data in pixels: if data.tag.endswith('Channel'): attr = data.attrib if ifds is None: spp = int(attr.get('SamplesPerPixel', spp)) ifds = [None] * (size // spp) if spp > 1: # correct channel dimension for spp idxshape = list((shape[i] // spp if ax == 'C' else shape[i]) for i, ax in enumerate(axes)) elif int(attr.get('SamplesPerPixel', 1)) != spp: raise ValueError( 'cannot handle differing SamplesPerPixel') continue if ifds is None: ifds = [None] * (size // spp) if not data.tag.endswith('TiffData'): continue attr = data.attrib ifd = int(attr.get('IFD', 0)) num = int(attr.get('NumPlanes', 1 if 'IFD' in attr else 0)) num = int(attr.get('PlaneCount', num)) idx = [int(attr.get('First'+ax, 0)) for ax in axes[:-2]] try: idx = numpy.ravel_multi_index(idx, idxshape[:-2]) except ValueError: # ImageJ produces invalid ome-xml when cropping log.warning('OME series: invalid TiffData index') continue for uuid in data: if not uuid.tag.endswith('UUID'): continue if uuid.text not in self._files: if not self._multifile: # abort reading multifile OME series # and fall back to generic series return [] fname = uuid.attrib['FileName'] try: tif = TiffFile(os.path.join(dirname, fname)) tif.pages.cache = True tif.pages.useframes = True tif.pages.keyframe = 0 tif.pages._load(keyframe=None) except (IOError, FileNotFoundError, ValueError): log.warning("OME series: failed to read '%s'", fname) break self._files[uuid.text] = tif tif.close() pages = self._files[uuid.text].pages try: for i in range(num if num else len(pages)): ifds[idx + i] = pages[ifd + i] except IndexError: log.warning('OME series: index out of range') # only process first UUID break else: pages = self.pages try: for i in range(num if num else min(len(pages), len(ifds))): ifds[idx + i] = pages[ifd + i] except IndexError: log.warning('OME series: index out of range') if all(i is None for i in ifds): # skip images without data continue # find a keyframe keyframe = None for i in ifds: # try find a TiffPage if i and i == i.keyframe: keyframe = i break if keyframe is None: # reload a TiffPage from file for i, keyframe in enumerate(ifds): if keyframe: keyframe.parent.pages.keyframe = keyframe.index keyframe = keyframe.parent.pages[keyframe.index] ifds[i] = keyframe break # move channel axis to match PlanarConfiguration storage # TODO: is this a bug or a inconsistency in the OME spec? if spp > 1: if keyframe.planarconfig == 1 and axes[-1] != 'C': i = axes.index('C') axes = axes[:i] + axes[i+1:] + axes[i:i+1] shape = shape[:i] + shape[i+1:] + shape[i:i+1] # FIXME: this implementation assumes the last dimensions are # stored in TIFF pages. Apparently that is not always the case. # For now, verify that shapes of keyframe and series match # If not, skip series. if keyframe.shape != tuple(shape[-len(keyframe.shape):]): log.warning('OME series: incompatible page shape %s; ' 'expected %s', keyframe.shape, tuple(shape[-len(keyframe.shape):])) del ifds continue # set a keyframe on all IFDs for i in ifds: if i is not None: i.keyframe = keyframe series.append( TiffPageSeries(ifds, shape, keyframe.dtype, axes, parent=self, name=name, kind='OME')) del ifds for serie in series: shape = list(serie.shape) for axis, (newaxis, labels) in modulo.items(): i = serie.axes.index(axis) size = len(labels) if shape[i] == size: serie.axes = serie.axes.replace(axis, newaxis, 1) else: shape[i] //= size shape.insert(i+1, size) serie.axes = serie.axes.replace(axis, axis+newaxis, 1) serie.shape = tuple(shape) # squeeze dimensions for serie in series: serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes) self.is_uniform = len(series) == 1 return series
[ "def", "_series_ome", "(", "self", ")", ":", "from", "xml", ".", "etree", "import", "cElementTree", "as", "etree", "# delayed import", "omexml", "=", "self", ".", "pages", "[", "0", "]", ".", "description", "try", ":", "root", "=", "etree", ".", "fromstring", "(", "omexml", ")", "except", "etree", ".", "ParseError", "as", "exc", ":", "# TODO: test badly encoded OME-XML", "log", ".", "warning", "(", "'OME series: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "try", ":", "# might work on Python 2", "omexml", "=", "omexml", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", ".", "encode", "(", "'utf-8'", ")", "root", "=", "etree", ".", "fromstring", "(", "omexml", ")", "except", "Exception", ":", "return", "None", "self", ".", "pages", ".", "cache", "=", "True", "self", ".", "pages", ".", "useframes", "=", "True", "self", ".", "pages", ".", "keyframe", "=", "0", "self", ".", "pages", ".", "_load", "(", "keyframe", "=", "None", ")", "uuid", "=", "root", ".", "attrib", ".", "get", "(", "'UUID'", ",", "None", ")", "self", ".", "_files", "=", "{", "uuid", ":", "self", "}", "dirname", "=", "self", ".", "_fh", ".", "dirname", "modulo", "=", "{", "}", "series", "=", "[", "]", "for", "element", "in", "root", ":", "if", "element", ".", "tag", ".", "endswith", "(", "'BinaryOnly'", ")", ":", "# TODO: load OME-XML from master or companion file", "log", ".", "warning", "(", "'OME series: not an ome-tiff master file'", ")", "break", "if", "element", ".", "tag", ".", "endswith", "(", "'StructuredAnnotations'", ")", ":", "for", "annot", "in", "element", ":", "if", "not", "annot", ".", "attrib", ".", "get", "(", "'Namespace'", ",", "''", ")", ".", "endswith", "(", "'modulo'", ")", ":", "continue", "for", "value", "in", "annot", ":", "for", "modul", "in", "value", ":", "for", "along", "in", "modul", ":", "if", "not", "along", ".", "tag", "[", ":", "-", "1", "]", ".", "endswith", "(", "'Along'", ")", ":", "continue", "axis", "=", "along", ".", "tag", "[", "-", "1", "]", "newaxis", "=", "along", ".", "attrib", ".", "get", "(", "'Type'", ",", "'other'", ")", "newaxis", "=", "TIFF", ".", "AXES_LABELS", "[", "newaxis", "]", "if", "'Start'", "in", "along", ".", "attrib", ":", "step", "=", "float", "(", "along", ".", "attrib", ".", "get", "(", "'Step'", ",", "1", ")", ")", "start", "=", "float", "(", "along", ".", "attrib", "[", "'Start'", "]", ")", "stop", "=", "float", "(", "along", ".", "attrib", "[", "'End'", "]", ")", "+", "step", "labels", "=", "numpy", ".", "arange", "(", "start", ",", "stop", ",", "step", ")", "else", ":", "labels", "=", "[", "label", ".", "text", "for", "label", "in", "along", "if", "label", ".", "tag", ".", "endswith", "(", "'Label'", ")", "]", "modulo", "[", "axis", "]", "=", "(", "newaxis", ",", "labels", ")", "if", "not", "element", ".", "tag", ".", "endswith", "(", "'Image'", ")", ":", "continue", "attr", "=", "element", ".", "attrib", "name", "=", "attr", ".", "get", "(", "'Name'", ",", "None", ")", "for", "pixels", "in", "element", ":", "if", "not", "pixels", ".", "tag", ".", "endswith", "(", "'Pixels'", ")", ":", "continue", "attr", "=", "pixels", ".", "attrib", "# dtype = attr.get('PixelType', None)", "axes", "=", "''", ".", "join", "(", "reversed", "(", "attr", "[", "'DimensionOrder'", "]", ")", ")", "shape", "=", "idxshape", "=", "list", "(", "int", "(", "attr", "[", "'Size'", "+", "ax", "]", ")", "for", "ax", "in", "axes", ")", "size", "=", "product", "(", "shape", "[", ":", "-", "2", "]", ")", "ifds", "=", "None", "spp", "=", "1", "# samples per pixel", "for", "data", "in", "pixels", ":", "if", "data", ".", "tag", ".", "endswith", "(", "'Channel'", ")", ":", "attr", "=", "data", ".", "attrib", "if", "ifds", "is", "None", ":", "spp", "=", "int", "(", "attr", ".", "get", "(", "'SamplesPerPixel'", ",", "spp", ")", ")", "ifds", "=", "[", "None", "]", "*", "(", "size", "//", "spp", ")", "if", "spp", ">", "1", ":", "# correct channel dimension for spp", "idxshape", "=", "list", "(", "(", "shape", "[", "i", "]", "//", "spp", "if", "ax", "==", "'C'", "else", "shape", "[", "i", "]", ")", "for", "i", ",", "ax", "in", "enumerate", "(", "axes", ")", ")", "elif", "int", "(", "attr", ".", "get", "(", "'SamplesPerPixel'", ",", "1", ")", ")", "!=", "spp", ":", "raise", "ValueError", "(", "'cannot handle differing SamplesPerPixel'", ")", "continue", "if", "ifds", "is", "None", ":", "ifds", "=", "[", "None", "]", "*", "(", "size", "//", "spp", ")", "if", "not", "data", ".", "tag", ".", "endswith", "(", "'TiffData'", ")", ":", "continue", "attr", "=", "data", ".", "attrib", "ifd", "=", "int", "(", "attr", ".", "get", "(", "'IFD'", ",", "0", ")", ")", "num", "=", "int", "(", "attr", ".", "get", "(", "'NumPlanes'", ",", "1", "if", "'IFD'", "in", "attr", "else", "0", ")", ")", "num", "=", "int", "(", "attr", ".", "get", "(", "'PlaneCount'", ",", "num", ")", ")", "idx", "=", "[", "int", "(", "attr", ".", "get", "(", "'First'", "+", "ax", ",", "0", ")", ")", "for", "ax", "in", "axes", "[", ":", "-", "2", "]", "]", "try", ":", "idx", "=", "numpy", ".", "ravel_multi_index", "(", "idx", ",", "idxshape", "[", ":", "-", "2", "]", ")", "except", "ValueError", ":", "# ImageJ produces invalid ome-xml when cropping", "log", ".", "warning", "(", "'OME series: invalid TiffData index'", ")", "continue", "for", "uuid", "in", "data", ":", "if", "not", "uuid", ".", "tag", ".", "endswith", "(", "'UUID'", ")", ":", "continue", "if", "uuid", ".", "text", "not", "in", "self", ".", "_files", ":", "if", "not", "self", ".", "_multifile", ":", "# abort reading multifile OME series", "# and fall back to generic series", "return", "[", "]", "fname", "=", "uuid", ".", "attrib", "[", "'FileName'", "]", "try", ":", "tif", "=", "TiffFile", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "fname", ")", ")", "tif", ".", "pages", ".", "cache", "=", "True", "tif", ".", "pages", ".", "useframes", "=", "True", "tif", ".", "pages", ".", "keyframe", "=", "0", "tif", ".", "pages", ".", "_load", "(", "keyframe", "=", "None", ")", "except", "(", "IOError", ",", "FileNotFoundError", ",", "ValueError", ")", ":", "log", ".", "warning", "(", "\"OME series: failed to read '%s'\"", ",", "fname", ")", "break", "self", ".", "_files", "[", "uuid", ".", "text", "]", "=", "tif", "tif", ".", "close", "(", ")", "pages", "=", "self", ".", "_files", "[", "uuid", ".", "text", "]", ".", "pages", "try", ":", "for", "i", "in", "range", "(", "num", "if", "num", "else", "len", "(", "pages", ")", ")", ":", "ifds", "[", "idx", "+", "i", "]", "=", "pages", "[", "ifd", "+", "i", "]", "except", "IndexError", ":", "log", ".", "warning", "(", "'OME series: index out of range'", ")", "# only process first UUID", "break", "else", ":", "pages", "=", "self", ".", "pages", "try", ":", "for", "i", "in", "range", "(", "num", "if", "num", "else", "min", "(", "len", "(", "pages", ")", ",", "len", "(", "ifds", ")", ")", ")", ":", "ifds", "[", "idx", "+", "i", "]", "=", "pages", "[", "ifd", "+", "i", "]", "except", "IndexError", ":", "log", ".", "warning", "(", "'OME series: index out of range'", ")", "if", "all", "(", "i", "is", "None", "for", "i", "in", "ifds", ")", ":", "# skip images without data", "continue", "# find a keyframe", "keyframe", "=", "None", "for", "i", "in", "ifds", ":", "# try find a TiffPage", "if", "i", "and", "i", "==", "i", ".", "keyframe", ":", "keyframe", "=", "i", "break", "if", "keyframe", "is", "None", ":", "# reload a TiffPage from file", "for", "i", ",", "keyframe", "in", "enumerate", "(", "ifds", ")", ":", "if", "keyframe", ":", "keyframe", ".", "parent", ".", "pages", ".", "keyframe", "=", "keyframe", ".", "index", "keyframe", "=", "keyframe", ".", "parent", ".", "pages", "[", "keyframe", ".", "index", "]", "ifds", "[", "i", "]", "=", "keyframe", "break", "# move channel axis to match PlanarConfiguration storage", "# TODO: is this a bug or a inconsistency in the OME spec?", "if", "spp", ">", "1", ":", "if", "keyframe", ".", "planarconfig", "==", "1", "and", "axes", "[", "-", "1", "]", "!=", "'C'", ":", "i", "=", "axes", ".", "index", "(", "'C'", ")", "axes", "=", "axes", "[", ":", "i", "]", "+", "axes", "[", "i", "+", "1", ":", "]", "+", "axes", "[", "i", ":", "i", "+", "1", "]", "shape", "=", "shape", "[", ":", "i", "]", "+", "shape", "[", "i", "+", "1", ":", "]", "+", "shape", "[", "i", ":", "i", "+", "1", "]", "# FIXME: this implementation assumes the last dimensions are", "# stored in TIFF pages. Apparently that is not always the case.", "# For now, verify that shapes of keyframe and series match", "# If not, skip series.", "if", "keyframe", ".", "shape", "!=", "tuple", "(", "shape", "[", "-", "len", "(", "keyframe", ".", "shape", ")", ":", "]", ")", ":", "log", ".", "warning", "(", "'OME series: incompatible page shape %s; '", "'expected %s'", ",", "keyframe", ".", "shape", ",", "tuple", "(", "shape", "[", "-", "len", "(", "keyframe", ".", "shape", ")", ":", "]", ")", ")", "del", "ifds", "continue", "# set a keyframe on all IFDs", "for", "i", "in", "ifds", ":", "if", "i", "is", "not", "None", ":", "i", ".", "keyframe", "=", "keyframe", "series", ".", "append", "(", "TiffPageSeries", "(", "ifds", ",", "shape", ",", "keyframe", ".", "dtype", ",", "axes", ",", "parent", "=", "self", ",", "name", "=", "name", ",", "kind", "=", "'OME'", ")", ")", "del", "ifds", "for", "serie", "in", "series", ":", "shape", "=", "list", "(", "serie", ".", "shape", ")", "for", "axis", ",", "(", "newaxis", ",", "labels", ")", "in", "modulo", ".", "items", "(", ")", ":", "i", "=", "serie", ".", "axes", ".", "index", "(", "axis", ")", "size", "=", "len", "(", "labels", ")", "if", "shape", "[", "i", "]", "==", "size", ":", "serie", ".", "axes", "=", "serie", ".", "axes", ".", "replace", "(", "axis", ",", "newaxis", ",", "1", ")", "else", ":", "shape", "[", "i", "]", "//=", "size", "shape", ".", "insert", "(", "i", "+", "1", ",", "size", ")", "serie", ".", "axes", "=", "serie", ".", "axes", ".", "replace", "(", "axis", ",", "axis", "+", "newaxis", ",", "1", ")", "serie", ".", "shape", "=", "tuple", "(", "shape", ")", "# squeeze dimensions", "for", "serie", "in", "series", ":", "serie", ".", "shape", ",", "serie", ".", "axes", "=", "squeeze_axes", "(", "serie", ".", "shape", ",", "serie", ".", "axes", ")", "self", ".", "is_uniform", "=", "len", "(", "series", ")", "==", "1", "return", "series" ]
Return image series in OME-TIFF file(s).
[ "Return", "image", "series", "in", "OME", "-", "TIFF", "file", "(", "s", ")", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2490-L2694
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._series_lsm
def _series_lsm(self): """Return main and thumbnail series in LSM file.""" lsmi = self.lsm_metadata axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi['ScanType']] if self.pages[0].photometric == 2: # RGB; more than one channel axes = axes.replace('C', '').replace('XY', 'XYC') if lsmi.get('DimensionP', 0) > 1: axes += 'P' if lsmi.get('DimensionM', 0) > 1: axes += 'M' axes = axes[::-1] shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes) name = lsmi.get('Name', '') pages = self.pages._getlist(slice(0, None, 2), validate=False) dtype = pages[0].dtype series = [TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSM')] if self.pages[1].is_reduced: pages = self.pages._getlist(slice(1, None, 2), validate=False) dtype = pages[0].dtype cp = 1 i = 0 while cp < len(pages) and i < len(shape)-2: cp *= shape[i] i += 1 shape = shape[:i] + pages[0].shape axes = axes[:i] + 'CYX' series.append(TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSMreduced')) self.is_uniform = False return series
python
def _series_lsm(self): """Return main and thumbnail series in LSM file.""" lsmi = self.lsm_metadata axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi['ScanType']] if self.pages[0].photometric == 2: # RGB; more than one channel axes = axes.replace('C', '').replace('XY', 'XYC') if lsmi.get('DimensionP', 0) > 1: axes += 'P' if lsmi.get('DimensionM', 0) > 1: axes += 'M' axes = axes[::-1] shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes) name = lsmi.get('Name', '') pages = self.pages._getlist(slice(0, None, 2), validate=False) dtype = pages[0].dtype series = [TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSM')] if self.pages[1].is_reduced: pages = self.pages._getlist(slice(1, None, 2), validate=False) dtype = pages[0].dtype cp = 1 i = 0 while cp < len(pages) and i < len(shape)-2: cp *= shape[i] i += 1 shape = shape[:i] + pages[0].shape axes = axes[:i] + 'CYX' series.append(TiffPageSeries(pages, shape, dtype, axes, name=name, kind='LSMreduced')) self.is_uniform = False return series
[ "def", "_series_lsm", "(", "self", ")", ":", "lsmi", "=", "self", ".", "lsm_metadata", "axes", "=", "TIFF", ".", "CZ_LSMINFO_SCANTYPE", "[", "lsmi", "[", "'ScanType'", "]", "]", "if", "self", ".", "pages", "[", "0", "]", ".", "photometric", "==", "2", ":", "# RGB; more than one channel", "axes", "=", "axes", ".", "replace", "(", "'C'", ",", "''", ")", ".", "replace", "(", "'XY'", ",", "'XYC'", ")", "if", "lsmi", ".", "get", "(", "'DimensionP'", ",", "0", ")", ">", "1", ":", "axes", "+=", "'P'", "if", "lsmi", ".", "get", "(", "'DimensionM'", ",", "0", ")", ">", "1", ":", "axes", "+=", "'M'", "axes", "=", "axes", "[", ":", ":", "-", "1", "]", "shape", "=", "tuple", "(", "int", "(", "lsmi", "[", "TIFF", ".", "CZ_LSMINFO_DIMENSIONS", "[", "i", "]", "]", ")", "for", "i", "in", "axes", ")", "name", "=", "lsmi", ".", "get", "(", "'Name'", ",", "''", ")", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "slice", "(", "0", ",", "None", ",", "2", ")", ",", "validate", "=", "False", ")", "dtype", "=", "pages", "[", "0", "]", ".", "dtype", "series", "=", "[", "TiffPageSeries", "(", "pages", ",", "shape", ",", "dtype", ",", "axes", ",", "name", "=", "name", ",", "kind", "=", "'LSM'", ")", "]", "if", "self", ".", "pages", "[", "1", "]", ".", "is_reduced", ":", "pages", "=", "self", ".", "pages", ".", "_getlist", "(", "slice", "(", "1", ",", "None", ",", "2", ")", ",", "validate", "=", "False", ")", "dtype", "=", "pages", "[", "0", "]", ".", "dtype", "cp", "=", "1", "i", "=", "0", "while", "cp", "<", "len", "(", "pages", ")", "and", "i", "<", "len", "(", "shape", ")", "-", "2", ":", "cp", "*=", "shape", "[", "i", "]", "i", "+=", "1", "shape", "=", "shape", "[", ":", "i", "]", "+", "pages", "[", "0", "]", ".", "shape", "axes", "=", "axes", "[", ":", "i", "]", "+", "'CYX'", "series", ".", "append", "(", "TiffPageSeries", "(", "pages", ",", "shape", ",", "dtype", ",", "axes", ",", "name", "=", "name", ",", "kind", "=", "'LSMreduced'", ")", ")", "self", ".", "is_uniform", "=", "False", "return", "series" ]
Return main and thumbnail series in LSM file.
[ "Return", "main", "and", "thumbnail", "series", "in", "LSM", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2696-L2728
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._lsm_load_pages
def _lsm_load_pages(self): """Load and fix all pages from LSM file.""" # cache all pages to preserve corrected values pages = self.pages pages.cache = True pages.useframes = True # use first and second page as keyframes pages.keyframe = 1 pages.keyframe = 0 # load remaining pages as frames pages._load(keyframe=None) # fix offsets and bytecounts first self._lsm_fix_strip_offsets() self._lsm_fix_strip_bytecounts() # assign keyframes for data and thumbnail series keyframe = pages[0] for page in pages[::2]: page.keyframe = keyframe keyframe = pages[1] for page in pages[1::2]: page.keyframe = keyframe
python
def _lsm_load_pages(self): """Load and fix all pages from LSM file.""" # cache all pages to preserve corrected values pages = self.pages pages.cache = True pages.useframes = True # use first and second page as keyframes pages.keyframe = 1 pages.keyframe = 0 # load remaining pages as frames pages._load(keyframe=None) # fix offsets and bytecounts first self._lsm_fix_strip_offsets() self._lsm_fix_strip_bytecounts() # assign keyframes for data and thumbnail series keyframe = pages[0] for page in pages[::2]: page.keyframe = keyframe keyframe = pages[1] for page in pages[1::2]: page.keyframe = keyframe
[ "def", "_lsm_load_pages", "(", "self", ")", ":", "# cache all pages to preserve corrected values", "pages", "=", "self", ".", "pages", "pages", ".", "cache", "=", "True", "pages", ".", "useframes", "=", "True", "# use first and second page as keyframes", "pages", ".", "keyframe", "=", "1", "pages", ".", "keyframe", "=", "0", "# load remaining pages as frames", "pages", ".", "_load", "(", "keyframe", "=", "None", ")", "# fix offsets and bytecounts first", "self", ".", "_lsm_fix_strip_offsets", "(", ")", "self", ".", "_lsm_fix_strip_bytecounts", "(", ")", "# assign keyframes for data and thumbnail series", "keyframe", "=", "pages", "[", "0", "]", "for", "page", "in", "pages", "[", ":", ":", "2", "]", ":", "page", ".", "keyframe", "=", "keyframe", "keyframe", "=", "pages", "[", "1", "]", "for", "page", "in", "pages", "[", "1", ":", ":", "2", "]", ":", "page", ".", "keyframe", "=", "keyframe" ]
Load and fix all pages from LSM file.
[ "Load", "and", "fix", "all", "pages", "from", "LSM", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2730-L2750
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._lsm_fix_strip_offsets
def _lsm_fix_strip_offsets(self): """Unwrap strip offsets for LSM files greater than 4 GB. Each series and position require separate unwrapping (undocumented). """ if self.filehandle.size < 2**32: return pages = self.pages npages = len(pages) series = self.series[0] axes = series.axes # find positions positions = 1 for i in 0, 1: if series.axes[i] in 'PM': positions *= series.shape[i] # make time axis first if positions > 1: ntimes = 0 for i in 1, 2: if axes[i] == 'T': ntimes = series.shape[i] break if ntimes: div, mod = divmod(npages, 2*positions*ntimes) assert mod == 0 shape = (positions, ntimes, div, 2) indices = numpy.arange(product(shape)).reshape(shape) indices = numpy.moveaxis(indices, 1, 0) else: indices = numpy.arange(npages).reshape(-1, 2) # images of reduced page might be stored first if pages[0]._offsetscounts[0][0] > pages[1]._offsetscounts[0][0]: indices = indices[..., ::-1] # unwrap offsets wrap = 0 previousoffset = 0 for i in indices.flat: page = pages[int(i)] dataoffsets = [] for currentoffset in page._offsetscounts[0]: if currentoffset < previousoffset: wrap += 2**32 dataoffsets.append(currentoffset + wrap) previousoffset = currentoffset page._offsetscounts = dataoffsets, page._offsetscounts[1]
python
def _lsm_fix_strip_offsets(self): """Unwrap strip offsets for LSM files greater than 4 GB. Each series and position require separate unwrapping (undocumented). """ if self.filehandle.size < 2**32: return pages = self.pages npages = len(pages) series = self.series[0] axes = series.axes # find positions positions = 1 for i in 0, 1: if series.axes[i] in 'PM': positions *= series.shape[i] # make time axis first if positions > 1: ntimes = 0 for i in 1, 2: if axes[i] == 'T': ntimes = series.shape[i] break if ntimes: div, mod = divmod(npages, 2*positions*ntimes) assert mod == 0 shape = (positions, ntimes, div, 2) indices = numpy.arange(product(shape)).reshape(shape) indices = numpy.moveaxis(indices, 1, 0) else: indices = numpy.arange(npages).reshape(-1, 2) # images of reduced page might be stored first if pages[0]._offsetscounts[0][0] > pages[1]._offsetscounts[0][0]: indices = indices[..., ::-1] # unwrap offsets wrap = 0 previousoffset = 0 for i in indices.flat: page = pages[int(i)] dataoffsets = [] for currentoffset in page._offsetscounts[0]: if currentoffset < previousoffset: wrap += 2**32 dataoffsets.append(currentoffset + wrap) previousoffset = currentoffset page._offsetscounts = dataoffsets, page._offsetscounts[1]
[ "def", "_lsm_fix_strip_offsets", "(", "self", ")", ":", "if", "self", ".", "filehandle", ".", "size", "<", "2", "**", "32", ":", "return", "pages", "=", "self", ".", "pages", "npages", "=", "len", "(", "pages", ")", "series", "=", "self", ".", "series", "[", "0", "]", "axes", "=", "series", ".", "axes", "# find positions", "positions", "=", "1", "for", "i", "in", "0", ",", "1", ":", "if", "series", ".", "axes", "[", "i", "]", "in", "'PM'", ":", "positions", "*=", "series", ".", "shape", "[", "i", "]", "# make time axis first", "if", "positions", ">", "1", ":", "ntimes", "=", "0", "for", "i", "in", "1", ",", "2", ":", "if", "axes", "[", "i", "]", "==", "'T'", ":", "ntimes", "=", "series", ".", "shape", "[", "i", "]", "break", "if", "ntimes", ":", "div", ",", "mod", "=", "divmod", "(", "npages", ",", "2", "*", "positions", "*", "ntimes", ")", "assert", "mod", "==", "0", "shape", "=", "(", "positions", ",", "ntimes", ",", "div", ",", "2", ")", "indices", "=", "numpy", ".", "arange", "(", "product", "(", "shape", ")", ")", ".", "reshape", "(", "shape", ")", "indices", "=", "numpy", ".", "moveaxis", "(", "indices", ",", "1", ",", "0", ")", "else", ":", "indices", "=", "numpy", ".", "arange", "(", "npages", ")", ".", "reshape", "(", "-", "1", ",", "2", ")", "# images of reduced page might be stored first", "if", "pages", "[", "0", "]", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", ">", "pages", "[", "1", "]", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", ":", "indices", "=", "indices", "[", "...", ",", ":", ":", "-", "1", "]", "# unwrap offsets", "wrap", "=", "0", "previousoffset", "=", "0", "for", "i", "in", "indices", ".", "flat", ":", "page", "=", "pages", "[", "int", "(", "i", ")", "]", "dataoffsets", "=", "[", "]", "for", "currentoffset", "in", "page", ".", "_offsetscounts", "[", "0", "]", ":", "if", "currentoffset", "<", "previousoffset", ":", "wrap", "+=", "2", "**", "32", "dataoffsets", ".", "append", "(", "currentoffset", "+", "wrap", ")", "previousoffset", "=", "currentoffset", "page", ".", "_offsetscounts", "=", "dataoffsets", ",", "page", ".", "_offsetscounts", "[", "1", "]" ]
Unwrap strip offsets for LSM files greater than 4 GB. Each series and position require separate unwrapping (undocumented).
[ "Unwrap", "strip", "offsets", "for", "LSM", "files", "greater", "than", "4", "GB", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2752-L2803
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile._lsm_fix_strip_bytecounts
def _lsm_fix_strip_bytecounts(self): """Set databytecounts to size of compressed data. The StripByteCounts tag in LSM files contains the number of bytes for the uncompressed data. """ pages = self.pages if pages[0].compression == 1: return # sort pages by first strip offset pages = sorted(pages, key=lambda p: p._offsetscounts[0][0]) npages = len(pages) - 1 for i, page in enumerate(pages): if page.index % 2: continue offsets, bytecounts = page._offsetscounts if i < npages: lastoffset = pages[i+1]._offsetscounts[0][0] else: # LZW compressed strips might be longer than uncompressed lastoffset = min(offsets[-1] + 2*bytecounts[-1], self._fh.size) for j in range(len(bytecounts) - 1): bytecounts[j] = offsets[j+1] - offsets[j] bytecounts[-1] = lastoffset - offsets[-1]
python
def _lsm_fix_strip_bytecounts(self): """Set databytecounts to size of compressed data. The StripByteCounts tag in LSM files contains the number of bytes for the uncompressed data. """ pages = self.pages if pages[0].compression == 1: return # sort pages by first strip offset pages = sorted(pages, key=lambda p: p._offsetscounts[0][0]) npages = len(pages) - 1 for i, page in enumerate(pages): if page.index % 2: continue offsets, bytecounts = page._offsetscounts if i < npages: lastoffset = pages[i+1]._offsetscounts[0][0] else: # LZW compressed strips might be longer than uncompressed lastoffset = min(offsets[-1] + 2*bytecounts[-1], self._fh.size) for j in range(len(bytecounts) - 1): bytecounts[j] = offsets[j+1] - offsets[j] bytecounts[-1] = lastoffset - offsets[-1]
[ "def", "_lsm_fix_strip_bytecounts", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", "if", "pages", "[", "0", "]", ".", "compression", "==", "1", ":", "return", "# sort pages by first strip offset", "pages", "=", "sorted", "(", "pages", ",", "key", "=", "lambda", "p", ":", "p", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", ")", "npages", "=", "len", "(", "pages", ")", "-", "1", "for", "i", ",", "page", "in", "enumerate", "(", "pages", ")", ":", "if", "page", ".", "index", "%", "2", ":", "continue", "offsets", ",", "bytecounts", "=", "page", ".", "_offsetscounts", "if", "i", "<", "npages", ":", "lastoffset", "=", "pages", "[", "i", "+", "1", "]", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", "else", ":", "# LZW compressed strips might be longer than uncompressed", "lastoffset", "=", "min", "(", "offsets", "[", "-", "1", "]", "+", "2", "*", "bytecounts", "[", "-", "1", "]", ",", "self", ".", "_fh", ".", "size", ")", "for", "j", "in", "range", "(", "len", "(", "bytecounts", ")", "-", "1", ")", ":", "bytecounts", "[", "j", "]", "=", "offsets", "[", "j", "+", "1", "]", "-", "offsets", "[", "j", "]", "bytecounts", "[", "-", "1", "]", "=", "lastoffset", "-", "offsets", "[", "-", "1", "]" ]
Set databytecounts to size of compressed data. The StripByteCounts tag in LSM files contains the number of bytes for the uncompressed data.
[ "Set", "databytecounts", "to", "size", "of", "compressed", "data", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2805-L2829
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.is_mdgel
def is_mdgel(self): """File has MD Gel format.""" # TODO: this likely reads the second page from file try: ismdgel = self.pages[0].is_mdgel or self.pages[1].is_mdgel if ismdgel: self.is_uniform = False return ismdgel except IndexError: return False
python
def is_mdgel(self): """File has MD Gel format.""" # TODO: this likely reads the second page from file try: ismdgel = self.pages[0].is_mdgel or self.pages[1].is_mdgel if ismdgel: self.is_uniform = False return ismdgel except IndexError: return False
[ "def", "is_mdgel", "(", "self", ")", ":", "# TODO: this likely reads the second page from file", "try", ":", "ismdgel", "=", "self", ".", "pages", "[", "0", "]", ".", "is_mdgel", "or", "self", ".", "pages", "[", "1", "]", ".", "is_mdgel", "if", "ismdgel", ":", "self", ".", "is_uniform", "=", "False", "return", "ismdgel", "except", "IndexError", ":", "return", "False" ]
File has MD Gel format.
[ "File", "has", "MD", "Gel", "format", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2911-L2920
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.is_uniform
def is_uniform(self): """Return if file contains a uniform series of pages.""" # the hashes of IFDs 0, 7, and -1 are the same pages = self.pages page = pages[0] if page.is_scanimage or page.is_nih: return True try: useframes = pages.useframes pages.useframes = False h = page.hash for i in (1, 7, -1): if pages[i].aspage().hash != h: return False except IndexError: return False finally: pages.useframes = useframes return True
python
def is_uniform(self): """Return if file contains a uniform series of pages.""" # the hashes of IFDs 0, 7, and -1 are the same pages = self.pages page = pages[0] if page.is_scanimage or page.is_nih: return True try: useframes = pages.useframes pages.useframes = False h = page.hash for i in (1, 7, -1): if pages[i].aspage().hash != h: return False except IndexError: return False finally: pages.useframes = useframes return True
[ "def", "is_uniform", "(", "self", ")", ":", "# the hashes of IFDs 0, 7, and -1 are the same", "pages", "=", "self", ".", "pages", "page", "=", "pages", "[", "0", "]", "if", "page", ".", "is_scanimage", "or", "page", ".", "is_nih", ":", "return", "True", "try", ":", "useframes", "=", "pages", ".", "useframes", "pages", ".", "useframes", "=", "False", "h", "=", "page", ".", "hash", "for", "i", "in", "(", "1", ",", "7", ",", "-", "1", ")", ":", "if", "pages", "[", "i", "]", ".", "aspage", "(", ")", ".", "hash", "!=", "h", ":", "return", "False", "except", "IndexError", ":", "return", "False", "finally", ":", "pages", ".", "useframes", "=", "useframes", "return", "True" ]
Return if file contains a uniform series of pages.
[ "Return", "if", "file", "contains", "a", "uniform", "series", "of", "pages", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2923-L2941
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.is_appendable
def is_appendable(self): """Return if pages can be appended to file without corrupting.""" # TODO: check other formats return not (self.is_lsm or self.is_stk or self.is_imagej or self.is_fluoview or self.is_micromanager)
python
def is_appendable(self): """Return if pages can be appended to file without corrupting.""" # TODO: check other formats return not (self.is_lsm or self.is_stk or self.is_imagej or self.is_fluoview or self.is_micromanager)
[ "def", "is_appendable", "(", "self", ")", ":", "# TODO: check other formats", "return", "not", "(", "self", ".", "is_lsm", "or", "self", ".", "is_stk", "or", "self", ".", "is_imagej", "or", "self", ".", "is_fluoview", "or", "self", ".", "is_micromanager", ")" ]
Return if pages can be appended to file without corrupting.
[ "Return", "if", "pages", "can", "be", "appended", "to", "file", "without", "corrupting", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2944-L2948
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.shaped_metadata
def shaped_metadata(self): """Return tifffile metadata from JSON descriptions as dicts.""" if not self.is_shaped: return None return tuple(json_description_metadata(s.pages[0].is_shaped) for s in self.series if s.kind.lower() == 'shaped')
python
def shaped_metadata(self): """Return tifffile metadata from JSON descriptions as dicts.""" if not self.is_shaped: return None return tuple(json_description_metadata(s.pages[0].is_shaped) for s in self.series if s.kind.lower() == 'shaped')
[ "def", "shaped_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_shaped", ":", "return", "None", "return", "tuple", "(", "json_description_metadata", "(", "s", ".", "pages", "[", "0", "]", ".", "is_shaped", ")", "for", "s", "in", "self", ".", "series", "if", "s", ".", "kind", ".", "lower", "(", ")", "==", "'shaped'", ")" ]
Return tifffile metadata from JSON descriptions as dicts.
[ "Return", "tifffile", "metadata", "from", "JSON", "descriptions", "as", "dicts", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2951-L2956
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.stk_metadata
def stk_metadata(self): """Return STK metadata from UIC tags as dict.""" if not self.is_stk: return None page = self.pages[0] tags = page.tags result = {} result['NumberPlanes'] = tags['UIC2tag'].count if page.description: result['PlaneDescriptions'] = page.description.split('\0') # result['plane_descriptions'] = stk_description_metadata( # page.image_description) if 'UIC1tag' in tags: result.update(tags['UIC1tag'].value) if 'UIC3tag' in tags: result.update(tags['UIC3tag'].value) # wavelengths if 'UIC4tag' in tags: result.update(tags['UIC4tag'].value) # override uic1 tags uic2tag = tags['UIC2tag'].value result['ZDistance'] = uic2tag['ZDistance'] result['TimeCreated'] = uic2tag['TimeCreated'] result['TimeModified'] = uic2tag['TimeModified'] try: result['DatetimeCreated'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateCreated'], uic2tag['TimeCreated'])], dtype='datetime64[ns]') result['DatetimeModified'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateModified'], uic2tag['TimeModified'])], dtype='datetime64[ns]') except ValueError as exc: log.warning('STK metadata: %s: %s', exc.__class__.__name__, exc) return result
python
def stk_metadata(self): """Return STK metadata from UIC tags as dict.""" if not self.is_stk: return None page = self.pages[0] tags = page.tags result = {} result['NumberPlanes'] = tags['UIC2tag'].count if page.description: result['PlaneDescriptions'] = page.description.split('\0') # result['plane_descriptions'] = stk_description_metadata( # page.image_description) if 'UIC1tag' in tags: result.update(tags['UIC1tag'].value) if 'UIC3tag' in tags: result.update(tags['UIC3tag'].value) # wavelengths if 'UIC4tag' in tags: result.update(tags['UIC4tag'].value) # override uic1 tags uic2tag = tags['UIC2tag'].value result['ZDistance'] = uic2tag['ZDistance'] result['TimeCreated'] = uic2tag['TimeCreated'] result['TimeModified'] = uic2tag['TimeModified'] try: result['DatetimeCreated'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateCreated'], uic2tag['TimeCreated'])], dtype='datetime64[ns]') result['DatetimeModified'] = numpy.array( [julian_datetime(*dt) for dt in zip(uic2tag['DateModified'], uic2tag['TimeModified'])], dtype='datetime64[ns]') except ValueError as exc: log.warning('STK metadata: %s: %s', exc.__class__.__name__, exc) return result
[ "def", "stk_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_stk", ":", "return", "None", "page", "=", "self", ".", "pages", "[", "0", "]", "tags", "=", "page", ".", "tags", "result", "=", "{", "}", "result", "[", "'NumberPlanes'", "]", "=", "tags", "[", "'UIC2tag'", "]", ".", "count", "if", "page", ".", "description", ":", "result", "[", "'PlaneDescriptions'", "]", "=", "page", ".", "description", ".", "split", "(", "'\\0'", ")", "# result['plane_descriptions'] = stk_description_metadata(", "# page.image_description)", "if", "'UIC1tag'", "in", "tags", ":", "result", ".", "update", "(", "tags", "[", "'UIC1tag'", "]", ".", "value", ")", "if", "'UIC3tag'", "in", "tags", ":", "result", ".", "update", "(", "tags", "[", "'UIC3tag'", "]", ".", "value", ")", "# wavelengths", "if", "'UIC4tag'", "in", "tags", ":", "result", ".", "update", "(", "tags", "[", "'UIC4tag'", "]", ".", "value", ")", "# override uic1 tags", "uic2tag", "=", "tags", "[", "'UIC2tag'", "]", ".", "value", "result", "[", "'ZDistance'", "]", "=", "uic2tag", "[", "'ZDistance'", "]", "result", "[", "'TimeCreated'", "]", "=", "uic2tag", "[", "'TimeCreated'", "]", "result", "[", "'TimeModified'", "]", "=", "uic2tag", "[", "'TimeModified'", "]", "try", ":", "result", "[", "'DatetimeCreated'", "]", "=", "numpy", ".", "array", "(", "[", "julian_datetime", "(", "*", "dt", ")", "for", "dt", "in", "zip", "(", "uic2tag", "[", "'DateCreated'", "]", ",", "uic2tag", "[", "'TimeCreated'", "]", ")", "]", ",", "dtype", "=", "'datetime64[ns]'", ")", "result", "[", "'DatetimeModified'", "]", "=", "numpy", ".", "array", "(", "[", "julian_datetime", "(", "*", "dt", ")", "for", "dt", "in", "zip", "(", "uic2tag", "[", "'DateModified'", "]", ",", "uic2tag", "[", "'TimeModified'", "]", ")", "]", ",", "dtype", "=", "'datetime64[ns]'", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "warning", "(", "'STK metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "return", "result" ]
Return STK metadata from UIC tags as dict.
[ "Return", "STK", "metadata", "from", "UIC", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L2974-L3007
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.imagej_metadata
def imagej_metadata(self): """Return consolidated ImageJ metadata as dict.""" if not self.is_imagej: return None page = self.pages[0] result = imagej_description_metadata(page.is_imagej) if 'IJMetadata' in page.tags: try: result.update(page.tags['IJMetadata'].value) except Exception: pass return result
python
def imagej_metadata(self): """Return consolidated ImageJ metadata as dict.""" if not self.is_imagej: return None page = self.pages[0] result = imagej_description_metadata(page.is_imagej) if 'IJMetadata' in page.tags: try: result.update(page.tags['IJMetadata'].value) except Exception: pass return result
[ "def", "imagej_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_imagej", ":", "return", "None", "page", "=", "self", ".", "pages", "[", "0", "]", "result", "=", "imagej_description_metadata", "(", "page", ".", "is_imagej", ")", "if", "'IJMetadata'", "in", "page", ".", "tags", ":", "try", ":", "result", ".", "update", "(", "page", ".", "tags", "[", "'IJMetadata'", "]", ".", "value", ")", "except", "Exception", ":", "pass", "return", "result" ]
Return consolidated ImageJ metadata as dict.
[ "Return", "consolidated", "ImageJ", "metadata", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3010-L3021
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.fluoview_metadata
def fluoview_metadata(self): """Return consolidated FluoView metadata as dict.""" if not self.is_fluoview: return None result = {} page = self.pages[0] result.update(page.tags['MM_Header'].value) # TODO: read stamps from all pages result['Stamp'] = page.tags['MM_Stamp'].value # skip parsing image description; not reliable # try: # t = fluoview_description_metadata(page.image_description) # if t is not None: # result['ImageDescription'] = t # except Exception as exc: # log.warning('FluoView metadata: ' # 'failed to parse image description (%s)', str(exc)) return result
python
def fluoview_metadata(self): """Return consolidated FluoView metadata as dict.""" if not self.is_fluoview: return None result = {} page = self.pages[0] result.update(page.tags['MM_Header'].value) # TODO: read stamps from all pages result['Stamp'] = page.tags['MM_Stamp'].value # skip parsing image description; not reliable # try: # t = fluoview_description_metadata(page.image_description) # if t is not None: # result['ImageDescription'] = t # except Exception as exc: # log.warning('FluoView metadata: ' # 'failed to parse image description (%s)', str(exc)) return result
[ "def", "fluoview_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_fluoview", ":", "return", "None", "result", "=", "{", "}", "page", "=", "self", ".", "pages", "[", "0", "]", "result", ".", "update", "(", "page", ".", "tags", "[", "'MM_Header'", "]", ".", "value", ")", "# TODO: read stamps from all pages", "result", "[", "'Stamp'", "]", "=", "page", ".", "tags", "[", "'MM_Stamp'", "]", ".", "value", "# skip parsing image description; not reliable", "# try:", "# t = fluoview_description_metadata(page.image_description)", "# if t is not None:", "# result['ImageDescription'] = t", "# except Exception as exc:", "# log.warning('FluoView metadata: '", "# 'failed to parse image description (%s)', str(exc))", "return", "result" ]
Return consolidated FluoView metadata as dict.
[ "Return", "consolidated", "FluoView", "metadata", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3024-L3041
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.fei_metadata
def fei_metadata(self): """Return FEI metadata from SFEG or HELIOS tags as dict.""" if not self.is_fei: return None tags = self.pages[0].tags if 'FEI_SFEG' in tags: return tags['FEI_SFEG'].value if 'FEI_HELIOS' in tags: return tags['FEI_HELIOS'].value return None
python
def fei_metadata(self): """Return FEI metadata from SFEG or HELIOS tags as dict.""" if not self.is_fei: return None tags = self.pages[0].tags if 'FEI_SFEG' in tags: return tags['FEI_SFEG'].value if 'FEI_HELIOS' in tags: return tags['FEI_HELIOS'].value return None
[ "def", "fei_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_fei", ":", "return", "None", "tags", "=", "self", ".", "pages", "[", "0", "]", ".", "tags", "if", "'FEI_SFEG'", "in", "tags", ":", "return", "tags", "[", "'FEI_SFEG'", "]", ".", "value", "if", "'FEI_HELIOS'", "in", "tags", ":", "return", "tags", "[", "'FEI_HELIOS'", "]", ".", "value", "return", "None" ]
Return FEI metadata from SFEG or HELIOS tags as dict.
[ "Return", "FEI", "metadata", "from", "SFEG", "or", "HELIOS", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3051-L3060
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.sis_metadata
def sis_metadata(self): """Return Olympus SIS metadata from SIS and INI tags as dict.""" if not self.is_sis: return None tags = self.pages[0].tags result = {} try: result.update(tags['OlympusINI'].value) except Exception: pass try: result.update(tags['OlympusSIS'].value) except Exception: pass return result
python
def sis_metadata(self): """Return Olympus SIS metadata from SIS and INI tags as dict.""" if not self.is_sis: return None tags = self.pages[0].tags result = {} try: result.update(tags['OlympusINI'].value) except Exception: pass try: result.update(tags['OlympusSIS'].value) except Exception: pass return result
[ "def", "sis_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_sis", ":", "return", "None", "tags", "=", "self", ".", "pages", "[", "0", "]", ".", "tags", "result", "=", "{", "}", "try", ":", "result", ".", "update", "(", "tags", "[", "'OlympusINI'", "]", ".", "value", ")", "except", "Exception", ":", "pass", "try", ":", "result", ".", "update", "(", "tags", "[", "'OlympusSIS'", "]", ".", "value", ")", "except", "Exception", ":", "pass", "return", "result" ]
Return Olympus SIS metadata from SIS and INI tags as dict.
[ "Return", "Olympus", "SIS", "metadata", "from", "SIS", "and", "INI", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3070-L3084
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.mdgel_metadata
def mdgel_metadata(self): """Return consolidated metadata from MD GEL tags as dict.""" for page in self.pages[:2]: if 'MDFileTag' in page.tags: tags = page.tags break else: return None result = {} for code in range(33445, 33453): name = TIFF.TAGS[code] if name not in tags: continue result[name[2:]] = tags[name].value return result
python
def mdgel_metadata(self): """Return consolidated metadata from MD GEL tags as dict.""" for page in self.pages[:2]: if 'MDFileTag' in page.tags: tags = page.tags break else: return None result = {} for code in range(33445, 33453): name = TIFF.TAGS[code] if name not in tags: continue result[name[2:]] = tags[name].value return result
[ "def", "mdgel_metadata", "(", "self", ")", ":", "for", "page", "in", "self", ".", "pages", "[", ":", "2", "]", ":", "if", "'MDFileTag'", "in", "page", ".", "tags", ":", "tags", "=", "page", ".", "tags", "break", "else", ":", "return", "None", "result", "=", "{", "}", "for", "code", "in", "range", "(", "33445", ",", "33453", ")", ":", "name", "=", "TIFF", ".", "TAGS", "[", "code", "]", "if", "name", "not", "in", "tags", ":", "continue", "result", "[", "name", "[", "2", ":", "]", "]", "=", "tags", "[", "name", "]", ".", "value", "return", "result" ]
Return consolidated metadata from MD GEL tags as dict.
[ "Return", "consolidated", "metadata", "from", "MD", "GEL", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3087-L3101
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.micromanager_metadata
def micromanager_metadata(self): """Return consolidated MicroManager metadata as dict.""" if not self.is_micromanager: return None # from file header result = read_micromanager_metadata(self._fh) # from tag result.update(self.pages[0].tags['MicroManagerMetadata'].value) return result
python
def micromanager_metadata(self): """Return consolidated MicroManager metadata as dict.""" if not self.is_micromanager: return None # from file header result = read_micromanager_metadata(self._fh) # from tag result.update(self.pages[0].tags['MicroManagerMetadata'].value) return result
[ "def", "micromanager_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_micromanager", ":", "return", "None", "# from file header", "result", "=", "read_micromanager_metadata", "(", "self", ".", "_fh", ")", "# from tag", "result", ".", "update", "(", "self", ".", "pages", "[", "0", "]", ".", "tags", "[", "'MicroManagerMetadata'", "]", ".", "value", ")", "return", "result" ]
Return consolidated MicroManager metadata as dict.
[ "Return", "consolidated", "MicroManager", "metadata", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3135-L3143
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFile.scanimage_metadata
def scanimage_metadata(self): """Return ScanImage non-varying frame and ROI metadata as dict.""" if not self.is_scanimage: return None result = {} try: framedata, roidata = read_scanimage_metadata(self._fh) result['FrameData'] = framedata result.update(roidata) except ValueError: pass # TODO: scanimage_artist_metadata try: result['Description'] = scanimage_description_metadata( self.pages[0].description) except Exception as exc: log.warning('ScanImage metadata: %s: %s', exc.__class__.__name__, exc) return result
python
def scanimage_metadata(self): """Return ScanImage non-varying frame and ROI metadata as dict.""" if not self.is_scanimage: return None result = {} try: framedata, roidata = read_scanimage_metadata(self._fh) result['FrameData'] = framedata result.update(roidata) except ValueError: pass # TODO: scanimage_artist_metadata try: result['Description'] = scanimage_description_metadata( self.pages[0].description) except Exception as exc: log.warning('ScanImage metadata: %s: %s', exc.__class__.__name__, exc) return result
[ "def", "scanimage_metadata", "(", "self", ")", ":", "if", "not", "self", ".", "is_scanimage", ":", "return", "None", "result", "=", "{", "}", "try", ":", "framedata", ",", "roidata", "=", "read_scanimage_metadata", "(", "self", ".", "_fh", ")", "result", "[", "'FrameData'", "]", "=", "framedata", "result", ".", "update", "(", "roidata", ")", "except", "ValueError", ":", "pass", "# TODO: scanimage_artist_metadata", "try", ":", "result", "[", "'Description'", "]", "=", "scanimage_description_metadata", "(", "self", ".", "pages", "[", "0", "]", ".", "description", ")", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "'ScanImage metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "return", "result" ]
Return ScanImage non-varying frame and ROI metadata as dict.
[ "Return", "ScanImage", "non", "-", "varying", "frame", "and", "ROI", "metadata", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3146-L3164
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages.cache
def cache(self, value): """Enable or disable caching of pages/frames. Clear cache if False.""" value = bool(value) if self._cache and not value: self._clear() self._cache = value
python
def cache(self, value): """Enable or disable caching of pages/frames. Clear cache if False.""" value = bool(value) if self._cache and not value: self._clear() self._cache = value
[ "def", "cache", "(", "self", ",", "value", ")", ":", "value", "=", "bool", "(", "value", ")", "if", "self", ".", "_cache", "and", "not", "value", ":", "self", ".", "_clear", "(", ")", "self", ".", "_cache", "=", "value" ]
Enable or disable caching of pages/frames. Clear cache if False.
[ "Enable", "or", "disable", "caching", "of", "pages", "/", "frames", ".", "Clear", "cache", "if", "False", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3241-L3246
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages.keyframe
def keyframe(self, index): """Set current keyframe. Load TiffPage from file if necessary.""" index = int(index) if index < 0: index %= len(self) if self._keyframe.index == index: return if index == 0: self._keyframe = self.pages[0] return if self._indexed or index < len(self.pages): page = self.pages[index] if isinstance(page, TiffPage): self._keyframe = page return if isinstance(page, TiffFrame): # remove existing TiffFrame self.pages[index] = page.offset # load TiffPage from file tiffpage = self._tiffpage self._tiffpage = TiffPage try: self._keyframe = self._getitem(index) finally: self._tiffpage = tiffpage # always cache keyframes self.pages[index] = self._keyframe
python
def keyframe(self, index): """Set current keyframe. Load TiffPage from file if necessary.""" index = int(index) if index < 0: index %= len(self) if self._keyframe.index == index: return if index == 0: self._keyframe = self.pages[0] return if self._indexed or index < len(self.pages): page = self.pages[index] if isinstance(page, TiffPage): self._keyframe = page return if isinstance(page, TiffFrame): # remove existing TiffFrame self.pages[index] = page.offset # load TiffPage from file tiffpage = self._tiffpage self._tiffpage = TiffPage try: self._keyframe = self._getitem(index) finally: self._tiffpage = tiffpage # always cache keyframes self.pages[index] = self._keyframe
[ "def", "keyframe", "(", "self", ",", "index", ")", ":", "index", "=", "int", "(", "index", ")", "if", "index", "<", "0", ":", "index", "%=", "len", "(", "self", ")", "if", "self", ".", "_keyframe", ".", "index", "==", "index", ":", "return", "if", "index", "==", "0", ":", "self", ".", "_keyframe", "=", "self", ".", "pages", "[", "0", "]", "return", "if", "self", ".", "_indexed", "or", "index", "<", "len", "(", "self", ".", "pages", ")", ":", "page", "=", "self", ".", "pages", "[", "index", "]", "if", "isinstance", "(", "page", ",", "TiffPage", ")", ":", "self", ".", "_keyframe", "=", "page", "return", "if", "isinstance", "(", "page", ",", "TiffFrame", ")", ":", "# remove existing TiffFrame", "self", ".", "pages", "[", "index", "]", "=", "page", ".", "offset", "# load TiffPage from file", "tiffpage", "=", "self", ".", "_tiffpage", "self", ".", "_tiffpage", "=", "TiffPage", "try", ":", "self", ".", "_keyframe", "=", "self", ".", "_getitem", "(", "index", ")", "finally", ":", "self", ".", "_tiffpage", "=", "tiffpage", "# always cache keyframes", "self", ".", "pages", "[", "index", "]", "=", "self", ".", "_keyframe" ]
Set current keyframe. Load TiffPage from file if necessary.
[ "Set", "current", "keyframe", ".", "Load", "TiffPage", "from", "file", "if", "necessary", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3264-L3290
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._load
def _load(self, keyframe=True): """Read all remaining pages from file.""" if self._cached: return pages = self.pages if not pages: return if not self._indexed: self._seek(-1) if not self._cache: return fh = self.parent.filehandle if keyframe is not None: keyframe = self._keyframe for i, page in enumerate(pages): if isinstance(page, inttypes): fh.seek(page) page = self._tiffpage(self.parent, index=i, keyframe=keyframe) pages[i] = page self._cached = True
python
def _load(self, keyframe=True): """Read all remaining pages from file.""" if self._cached: return pages = self.pages if not pages: return if not self._indexed: self._seek(-1) if not self._cache: return fh = self.parent.filehandle if keyframe is not None: keyframe = self._keyframe for i, page in enumerate(pages): if isinstance(page, inttypes): fh.seek(page) page = self._tiffpage(self.parent, index=i, keyframe=keyframe) pages[i] = page self._cached = True
[ "def", "_load", "(", "self", ",", "keyframe", "=", "True", ")", ":", "if", "self", ".", "_cached", ":", "return", "pages", "=", "self", ".", "pages", "if", "not", "pages", ":", "return", "if", "not", "self", ".", "_indexed", ":", "self", ".", "_seek", "(", "-", "1", ")", "if", "not", "self", ".", "_cache", ":", "return", "fh", "=", "self", ".", "parent", ".", "filehandle", "if", "keyframe", "is", "not", "None", ":", "keyframe", "=", "self", ".", "_keyframe", "for", "i", ",", "page", "in", "enumerate", "(", "pages", ")", ":", "if", "isinstance", "(", "page", ",", "inttypes", ")", ":", "fh", ".", "seek", "(", "page", ")", "page", "=", "self", ".", "_tiffpage", "(", "self", ".", "parent", ",", "index", "=", "i", ",", "keyframe", "=", "keyframe", ")", "pages", "[", "i", "]", "=", "page", "self", ".", "_cached", "=", "True" ]
Read all remaining pages from file.
[ "Read", "all", "remaining", "pages", "from", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3299-L3318
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._load_virtual_frames
def _load_virtual_frames(self): """Calculate virtual TiffFrames.""" pages = self.pages try: if sys.version_info[0] == 2: raise ValueError('not supported on Python 2') if len(pages) > 1: raise ValueError('pages already loaded') page = pages[0] bytecounts = page._offsetscounts[1] if len(bytecounts) != 1: raise ValueError('data not contiguous') self._seek(4) delta = pages[2] - pages[1] if pages[3] - pages[2] != delta or pages[4] - pages[3] != delta: raise ValueError('page offsets not equidistant') page1 = self._getitem(1, validate=page.hash) offsetoffset = page1._offsetscounts[0][0] - page1.offset if offsetoffset < 0 or offsetoffset > delta: raise ValueError('page offsets not equidistant') pages = [page, page1] filesize = self.parent.filehandle.size - delta for index, offset in enumerate(range(page1.offset+delta, filesize, delta)): offsets = [offset + offsetoffset] offset = offset if offset < 2**31 else None pages.append( TiffFrame(parent=page.parent, index=index+2, offset=None, offsets=offsets, bytecounts=bytecounts, keyframe=page)) except Exception as exc: log.warning( 'TiffPages: failed to load virtual frames: %s', str(exc)) assert pages[1] self.pages = pages self._cache = True self._cached = True self._indexed = True
python
def _load_virtual_frames(self): """Calculate virtual TiffFrames.""" pages = self.pages try: if sys.version_info[0] == 2: raise ValueError('not supported on Python 2') if len(pages) > 1: raise ValueError('pages already loaded') page = pages[0] bytecounts = page._offsetscounts[1] if len(bytecounts) != 1: raise ValueError('data not contiguous') self._seek(4) delta = pages[2] - pages[1] if pages[3] - pages[2] != delta or pages[4] - pages[3] != delta: raise ValueError('page offsets not equidistant') page1 = self._getitem(1, validate=page.hash) offsetoffset = page1._offsetscounts[0][0] - page1.offset if offsetoffset < 0 or offsetoffset > delta: raise ValueError('page offsets not equidistant') pages = [page, page1] filesize = self.parent.filehandle.size - delta for index, offset in enumerate(range(page1.offset+delta, filesize, delta)): offsets = [offset + offsetoffset] offset = offset if offset < 2**31 else None pages.append( TiffFrame(parent=page.parent, index=index+2, offset=None, offsets=offsets, bytecounts=bytecounts, keyframe=page)) except Exception as exc: log.warning( 'TiffPages: failed to load virtual frames: %s', str(exc)) assert pages[1] self.pages = pages self._cache = True self._cached = True self._indexed = True
[ "def", "_load_virtual_frames", "(", "self", ")", ":", "pages", "=", "self", ".", "pages", "try", ":", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "raise", "ValueError", "(", "'not supported on Python 2'", ")", "if", "len", "(", "pages", ")", ">", "1", ":", "raise", "ValueError", "(", "'pages already loaded'", ")", "page", "=", "pages", "[", "0", "]", "bytecounts", "=", "page", ".", "_offsetscounts", "[", "1", "]", "if", "len", "(", "bytecounts", ")", "!=", "1", ":", "raise", "ValueError", "(", "'data not contiguous'", ")", "self", ".", "_seek", "(", "4", ")", "delta", "=", "pages", "[", "2", "]", "-", "pages", "[", "1", "]", "if", "pages", "[", "3", "]", "-", "pages", "[", "2", "]", "!=", "delta", "or", "pages", "[", "4", "]", "-", "pages", "[", "3", "]", "!=", "delta", ":", "raise", "ValueError", "(", "'page offsets not equidistant'", ")", "page1", "=", "self", ".", "_getitem", "(", "1", ",", "validate", "=", "page", ".", "hash", ")", "offsetoffset", "=", "page1", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", "-", "page1", ".", "offset", "if", "offsetoffset", "<", "0", "or", "offsetoffset", ">", "delta", ":", "raise", "ValueError", "(", "'page offsets not equidistant'", ")", "pages", "=", "[", "page", ",", "page1", "]", "filesize", "=", "self", ".", "parent", ".", "filehandle", ".", "size", "-", "delta", "for", "index", ",", "offset", "in", "enumerate", "(", "range", "(", "page1", ".", "offset", "+", "delta", ",", "filesize", ",", "delta", ")", ")", ":", "offsets", "=", "[", "offset", "+", "offsetoffset", "]", "offset", "=", "offset", "if", "offset", "<", "2", "**", "31", "else", "None", "pages", ".", "append", "(", "TiffFrame", "(", "parent", "=", "page", ".", "parent", ",", "index", "=", "index", "+", "2", ",", "offset", "=", "None", ",", "offsets", "=", "offsets", ",", "bytecounts", "=", "bytecounts", ",", "keyframe", "=", "page", ")", ")", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "'TiffPages: failed to load virtual frames: %s'", ",", "str", "(", "exc", ")", ")", "assert", "pages", "[", "1", "]", "self", ".", "pages", "=", "pages", "self", ".", "_cache", "=", "True", "self", ".", "_cached", "=", "True", "self", ".", "_indexed", "=", "True" ]
Calculate virtual TiffFrames.
[ "Calculate", "virtual", "TiffFrames", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3320-L3357
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._clear
def _clear(self, fully=True): """Delete all but first page from cache. Set keyframe to first page.""" pages = self.pages if not pages: return self._keyframe = pages[0] if fully: # delete all but first TiffPage/TiffFrame for i, page in enumerate(pages[1:]): if not isinstance(page, inttypes) and page.offset is not None: pages[i+1] = page.offset elif TiffFrame is not TiffPage: # delete only TiffFrames for i, page in enumerate(pages): if isinstance(page, TiffFrame) and page.offset is not None: pages[i] = page.offset self._cached = False
python
def _clear(self, fully=True): """Delete all but first page from cache. Set keyframe to first page.""" pages = self.pages if not pages: return self._keyframe = pages[0] if fully: # delete all but first TiffPage/TiffFrame for i, page in enumerate(pages[1:]): if not isinstance(page, inttypes) and page.offset is not None: pages[i+1] = page.offset elif TiffFrame is not TiffPage: # delete only TiffFrames for i, page in enumerate(pages): if isinstance(page, TiffFrame) and page.offset is not None: pages[i] = page.offset self._cached = False
[ "def", "_clear", "(", "self", ",", "fully", "=", "True", ")", ":", "pages", "=", "self", ".", "pages", "if", "not", "pages", ":", "return", "self", ".", "_keyframe", "=", "pages", "[", "0", "]", "if", "fully", ":", "# delete all but first TiffPage/TiffFrame", "for", "i", ",", "page", "in", "enumerate", "(", "pages", "[", "1", ":", "]", ")", ":", "if", "not", "isinstance", "(", "page", ",", "inttypes", ")", "and", "page", ".", "offset", "is", "not", "None", ":", "pages", "[", "i", "+", "1", "]", "=", "page", ".", "offset", "elif", "TiffFrame", "is", "not", "TiffPage", ":", "# delete only TiffFrames", "for", "i", ",", "page", "in", "enumerate", "(", "pages", ")", ":", "if", "isinstance", "(", "page", ",", "TiffFrame", ")", "and", "page", ".", "offset", "is", "not", "None", ":", "pages", "[", "i", "]", "=", "page", ".", "offset", "self", ".", "_cached", "=", "False" ]
Delete all but first page from cache. Set keyframe to first page.
[ "Delete", "all", "but", "first", "page", "from", "cache", ".", "Set", "keyframe", "to", "first", "page", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3359-L3375
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._seek
def _seek(self, index, maxpages=None): """Seek file to offset of page specified by index.""" pages = self.pages lenpages = len(pages) if lenpages == 0: raise IndexError('index out of range') fh = self.parent.filehandle if fh.closed: raise ValueError('seek of closed file') if self._indexed or 0 <= index < lenpages: page = pages[index] offset = page if isinstance(page, inttypes) else page.offset fh.seek(offset) return tiff = self.parent.tiff offsetformat = tiff.ifdoffsetformat offsetsize = tiff.ifdoffsetsize tagnoformat = tiff.tagnoformat tagnosize = tiff.tagnosize tagsize = tiff.tagsize unpack = struct.unpack page = pages[-1] offset = page if isinstance(page, inttypes) else page.offset if maxpages is None: maxpages = 2**22 while lenpages < maxpages: # read offsets to pages from file until index is reached fh.seek(offset) # skip tags try: tagno = unpack(tagnoformat, fh.read(tagnosize))[0] if tagno > 4096: raise TiffFileError( 'suspicious number of tags: %i' % tagno) except Exception: log.warning('TiffPages: corrupted tag list of page %i @ %i', lenpages, offset) del pages[-1] lenpages -= 1 self._indexed = True break self._nextpageoffset = offset + tagnosize + tagno * tagsize fh.seek(self._nextpageoffset) # read offset to next page offset = unpack(offsetformat, fh.read(offsetsize))[0] if offset == 0: self._indexed = True break if offset >= fh.size: log.warning('TiffPages: invalid page offset (%i)', offset) self._indexed = True break pages.append(offset) lenpages += 1 if 0 <= index < lenpages: break # detect some circular references if lenpages == 100: for p in pages[:-1]: if offset == (p if isinstance(p, inttypes) else p.offset): raise TiffFileError('invalid circular IFD reference') if index >= lenpages: raise IndexError('index out of range') page = pages[index] fh.seek(page if isinstance(page, inttypes) else page.offset)
python
def _seek(self, index, maxpages=None): """Seek file to offset of page specified by index.""" pages = self.pages lenpages = len(pages) if lenpages == 0: raise IndexError('index out of range') fh = self.parent.filehandle if fh.closed: raise ValueError('seek of closed file') if self._indexed or 0 <= index < lenpages: page = pages[index] offset = page if isinstance(page, inttypes) else page.offset fh.seek(offset) return tiff = self.parent.tiff offsetformat = tiff.ifdoffsetformat offsetsize = tiff.ifdoffsetsize tagnoformat = tiff.tagnoformat tagnosize = tiff.tagnosize tagsize = tiff.tagsize unpack = struct.unpack page = pages[-1] offset = page if isinstance(page, inttypes) else page.offset if maxpages is None: maxpages = 2**22 while lenpages < maxpages: # read offsets to pages from file until index is reached fh.seek(offset) # skip tags try: tagno = unpack(tagnoformat, fh.read(tagnosize))[0] if tagno > 4096: raise TiffFileError( 'suspicious number of tags: %i' % tagno) except Exception: log.warning('TiffPages: corrupted tag list of page %i @ %i', lenpages, offset) del pages[-1] lenpages -= 1 self._indexed = True break self._nextpageoffset = offset + tagnosize + tagno * tagsize fh.seek(self._nextpageoffset) # read offset to next page offset = unpack(offsetformat, fh.read(offsetsize))[0] if offset == 0: self._indexed = True break if offset >= fh.size: log.warning('TiffPages: invalid page offset (%i)', offset) self._indexed = True break pages.append(offset) lenpages += 1 if 0 <= index < lenpages: break # detect some circular references if lenpages == 100: for p in pages[:-1]: if offset == (p if isinstance(p, inttypes) else p.offset): raise TiffFileError('invalid circular IFD reference') if index >= lenpages: raise IndexError('index out of range') page = pages[index] fh.seek(page if isinstance(page, inttypes) else page.offset)
[ "def", "_seek", "(", "self", ",", "index", ",", "maxpages", "=", "None", ")", ":", "pages", "=", "self", ".", "pages", "lenpages", "=", "len", "(", "pages", ")", "if", "lenpages", "==", "0", ":", "raise", "IndexError", "(", "'index out of range'", ")", "fh", "=", "self", ".", "parent", ".", "filehandle", "if", "fh", ".", "closed", ":", "raise", "ValueError", "(", "'seek of closed file'", ")", "if", "self", ".", "_indexed", "or", "0", "<=", "index", "<", "lenpages", ":", "page", "=", "pages", "[", "index", "]", "offset", "=", "page", "if", "isinstance", "(", "page", ",", "inttypes", ")", "else", "page", ".", "offset", "fh", ".", "seek", "(", "offset", ")", "return", "tiff", "=", "self", ".", "parent", ".", "tiff", "offsetformat", "=", "tiff", ".", "ifdoffsetformat", "offsetsize", "=", "tiff", ".", "ifdoffsetsize", "tagnoformat", "=", "tiff", ".", "tagnoformat", "tagnosize", "=", "tiff", ".", "tagnosize", "tagsize", "=", "tiff", ".", "tagsize", "unpack", "=", "struct", ".", "unpack", "page", "=", "pages", "[", "-", "1", "]", "offset", "=", "page", "if", "isinstance", "(", "page", ",", "inttypes", ")", "else", "page", ".", "offset", "if", "maxpages", "is", "None", ":", "maxpages", "=", "2", "**", "22", "while", "lenpages", "<", "maxpages", ":", "# read offsets to pages from file until index is reached", "fh", ".", "seek", "(", "offset", ")", "# skip tags", "try", ":", "tagno", "=", "unpack", "(", "tagnoformat", ",", "fh", ".", "read", "(", "tagnosize", ")", ")", "[", "0", "]", "if", "tagno", ">", "4096", ":", "raise", "TiffFileError", "(", "'suspicious number of tags: %i'", "%", "tagno", ")", "except", "Exception", ":", "log", ".", "warning", "(", "'TiffPages: corrupted tag list of page %i @ %i'", ",", "lenpages", ",", "offset", ")", "del", "pages", "[", "-", "1", "]", "lenpages", "-=", "1", "self", ".", "_indexed", "=", "True", "break", "self", ".", "_nextpageoffset", "=", "offset", "+", "tagnosize", "+", "tagno", "*", "tagsize", "fh", ".", "seek", "(", "self", ".", "_nextpageoffset", ")", "# read offset to next page", "offset", "=", "unpack", "(", "offsetformat", ",", "fh", ".", "read", "(", "offsetsize", ")", ")", "[", "0", "]", "if", "offset", "==", "0", ":", "self", ".", "_indexed", "=", "True", "break", "if", "offset", ">=", "fh", ".", "size", ":", "log", ".", "warning", "(", "'TiffPages: invalid page offset (%i)'", ",", "offset", ")", "self", ".", "_indexed", "=", "True", "break", "pages", ".", "append", "(", "offset", ")", "lenpages", "+=", "1", "if", "0", "<=", "index", "<", "lenpages", ":", "break", "# detect some circular references", "if", "lenpages", "==", "100", ":", "for", "p", "in", "pages", "[", ":", "-", "1", "]", ":", "if", "offset", "==", "(", "p", "if", "isinstance", "(", "p", ",", "inttypes", ")", "else", "p", ".", "offset", ")", ":", "raise", "TiffFileError", "(", "'invalid circular IFD reference'", ")", "if", "index", ">=", "lenpages", ":", "raise", "IndexError", "(", "'index out of range'", ")", "page", "=", "pages", "[", "index", "]", "fh", ".", "seek", "(", "page", "if", "isinstance", "(", "page", ",", "inttypes", ")", "else", "page", ".", "offset", ")" ]
Seek file to offset of page specified by index.
[ "Seek", "file", "to", "offset", "of", "page", "specified", "by", "index", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3377-L3451
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._getlist
def _getlist(self, key=None, useframes=True, validate=True): """Return specified pages as list of TiffPages or TiffFrames. The first item is a TiffPage, and is used as a keyframe for following TiffFrames. """ getitem = self._getitem _useframes = self.useframes if key is None: key = iter(range(len(self))) elif isinstance(key, Iterable): key = iter(key) elif isinstance(key, slice): start, stop, _ = key.indices(2**31-1) if not self._indexed and max(stop, start) > len(self.pages): self._seek(-1) key = iter(range(*key.indices(len(self.pages)))) elif isinstance(key, inttypes): # return single TiffPage self.useframes = False if key == 0: return [self.pages[key]] try: return [getitem(key)] finally: self.useframes = _useframes else: raise TypeError('key must be an integer, slice, or iterable') # use first page as keyframe keyframe = self._keyframe self.keyframe = next(key) if validate: validate = self._keyframe.hash if useframes: self.useframes = True try: pages = [getitem(i, validate) for i in key] pages.insert(0, self._keyframe) finally: # restore state self._keyframe = keyframe if useframes: self.useframes = _useframes return pages
python
def _getlist(self, key=None, useframes=True, validate=True): """Return specified pages as list of TiffPages or TiffFrames. The first item is a TiffPage, and is used as a keyframe for following TiffFrames. """ getitem = self._getitem _useframes = self.useframes if key is None: key = iter(range(len(self))) elif isinstance(key, Iterable): key = iter(key) elif isinstance(key, slice): start, stop, _ = key.indices(2**31-1) if not self._indexed and max(stop, start) > len(self.pages): self._seek(-1) key = iter(range(*key.indices(len(self.pages)))) elif isinstance(key, inttypes): # return single TiffPage self.useframes = False if key == 0: return [self.pages[key]] try: return [getitem(key)] finally: self.useframes = _useframes else: raise TypeError('key must be an integer, slice, or iterable') # use first page as keyframe keyframe = self._keyframe self.keyframe = next(key) if validate: validate = self._keyframe.hash if useframes: self.useframes = True try: pages = [getitem(i, validate) for i in key] pages.insert(0, self._keyframe) finally: # restore state self._keyframe = keyframe if useframes: self.useframes = _useframes return pages
[ "def", "_getlist", "(", "self", ",", "key", "=", "None", ",", "useframes", "=", "True", ",", "validate", "=", "True", ")", ":", "getitem", "=", "self", ".", "_getitem", "_useframes", "=", "self", ".", "useframes", "if", "key", "is", "None", ":", "key", "=", "iter", "(", "range", "(", "len", "(", "self", ")", ")", ")", "elif", "isinstance", "(", "key", ",", "Iterable", ")", ":", "key", "=", "iter", "(", "key", ")", "elif", "isinstance", "(", "key", ",", "slice", ")", ":", "start", ",", "stop", ",", "_", "=", "key", ".", "indices", "(", "2", "**", "31", "-", "1", ")", "if", "not", "self", ".", "_indexed", "and", "max", "(", "stop", ",", "start", ")", ">", "len", "(", "self", ".", "pages", ")", ":", "self", ".", "_seek", "(", "-", "1", ")", "key", "=", "iter", "(", "range", "(", "*", "key", ".", "indices", "(", "len", "(", "self", ".", "pages", ")", ")", ")", ")", "elif", "isinstance", "(", "key", ",", "inttypes", ")", ":", "# return single TiffPage", "self", ".", "useframes", "=", "False", "if", "key", "==", "0", ":", "return", "[", "self", ".", "pages", "[", "key", "]", "]", "try", ":", "return", "[", "getitem", "(", "key", ")", "]", "finally", ":", "self", ".", "useframes", "=", "_useframes", "else", ":", "raise", "TypeError", "(", "'key must be an integer, slice, or iterable'", ")", "# use first page as keyframe", "keyframe", "=", "self", ".", "_keyframe", "self", ".", "keyframe", "=", "next", "(", "key", ")", "if", "validate", ":", "validate", "=", "self", ".", "_keyframe", ".", "hash", "if", "useframes", ":", "self", ".", "useframes", "=", "True", "try", ":", "pages", "=", "[", "getitem", "(", "i", ",", "validate", ")", "for", "i", "in", "key", "]", "pages", ".", "insert", "(", "0", ",", "self", ".", "_keyframe", ")", "finally", ":", "# restore state", "self", ".", "_keyframe", "=", "keyframe", "if", "useframes", ":", "self", ".", "useframes", "=", "_useframes", "return", "pages" ]
Return specified pages as list of TiffPages or TiffFrames. The first item is a TiffPage, and is used as a keyframe for following TiffFrames.
[ "Return", "specified", "pages", "as", "list", "of", "TiffPages", "or", "TiffFrames", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3453-L3500
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPages._getitem
def _getitem(self, key, validate=False): """Return specified page from cache or file.""" key = int(key) pages = self.pages if key < 0: key %= len(self) elif self._indexed and key >= len(pages): raise IndexError('index out of range') if key < len(pages): page = pages[key] if self._cache: if not isinstance(page, inttypes): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page elif isinstance(page, (TiffPage, self._tiffpage)): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page self._seek(key) page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) if validate and validate != page.hash: raise RuntimeError('page hash mismatch') if self._cache: pages[key] = page return page
python
def _getitem(self, key, validate=False): """Return specified page from cache or file.""" key = int(key) pages = self.pages if key < 0: key %= len(self) elif self._indexed and key >= len(pages): raise IndexError('index out of range') if key < len(pages): page = pages[key] if self._cache: if not isinstance(page, inttypes): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page elif isinstance(page, (TiffPage, self._tiffpage)): if validate and validate != page.hash: raise RuntimeError('page hash mismatch') return page self._seek(key) page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) if validate and validate != page.hash: raise RuntimeError('page hash mismatch') if self._cache: pages[key] = page return page
[ "def", "_getitem", "(", "self", ",", "key", ",", "validate", "=", "False", ")", ":", "key", "=", "int", "(", "key", ")", "pages", "=", "self", ".", "pages", "if", "key", "<", "0", ":", "key", "%=", "len", "(", "self", ")", "elif", "self", ".", "_indexed", "and", "key", ">=", "len", "(", "pages", ")", ":", "raise", "IndexError", "(", "'index out of range'", ")", "if", "key", "<", "len", "(", "pages", ")", ":", "page", "=", "pages", "[", "key", "]", "if", "self", ".", "_cache", ":", "if", "not", "isinstance", "(", "page", ",", "inttypes", ")", ":", "if", "validate", "and", "validate", "!=", "page", ".", "hash", ":", "raise", "RuntimeError", "(", "'page hash mismatch'", ")", "return", "page", "elif", "isinstance", "(", "page", ",", "(", "TiffPage", ",", "self", ".", "_tiffpage", ")", ")", ":", "if", "validate", "and", "validate", "!=", "page", ".", "hash", ":", "raise", "RuntimeError", "(", "'page hash mismatch'", ")", "return", "page", "self", ".", "_seek", "(", "key", ")", "page", "=", "self", ".", "_tiffpage", "(", "self", ".", "parent", ",", "index", "=", "key", ",", "keyframe", "=", "self", ".", "_keyframe", ")", "if", "validate", "and", "validate", "!=", "page", ".", "hash", ":", "raise", "RuntimeError", "(", "'page hash mismatch'", ")", "if", "self", ".", "_cache", ":", "pages", "[", "key", "]", "=", "page", "return", "page" ]
Return specified page from cache or file.
[ "Return", "specified", "page", "from", "cache", "or", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3502-L3530
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.asarray
def asarray(self, out=None, squeeze=True, lock=None, reopen=True, maxsize=None, maxworkers=None, validate=True): """Read image data from file and return as numpy array. Raise ValueError if format is unsupported. Parameters ---------- out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. squeeze : bool If True (default), all length-1 dimensions (except X and Y) are squeezed out from the array. If False, the shape of the returned array might be different from the page.shape. lock : {RLock, NullContext} A reentrant lock used to syncronize reads from file. If None (default), the lock of the parent's filehandle is used. reopen : bool If True (default) and the parent file handle is closed, the file is temporarily re-opened and closed if no exception occurs. maxsize: int Maximum size of data before a ValueError is raised. Can be used to catch DOS. Default: 16 TB. maxworkers : int or None Maximum number of threads to concurrently decode tile data. If None (default), up to half the CPU cores are used for compressed tiles. See remarks in TiffFile.asarray. validate : bool If True (default), validate various parameters. If None, only validate parameters and return None. Returns ------- numpy.ndarray Numpy array of decompressed, depredicted, and unpacked image data read from Strip/Tile Offsets/ByteCounts, formatted according to shape and dtype metadata found in tags and parameters. Photometric conversion, pre-multiplied alpha, orientation, and colorimetry corrections are not applied. Specifically, CMYK images are not converted to RGB, MinIsWhite images are not inverted, and color palettes are not applied. """ # properties from TiffPage or TiffFrame fh = self.parent.filehandle byteorder = self.parent.tiff.byteorder offsets, bytecounts = self._offsetscounts self_ = self self = self.keyframe # self or keyframe if not self._shape or product(self._shape) == 0: return None tags = self.tags if validate or validate is None: if maxsize is None: maxsize = 2**44 if maxsize and product(self._shape) > maxsize: raise ValueError('data are too large %s' % str(self._shape)) if self.dtype is None: raise ValueError('data type not supported: %s%i' % ( self.sampleformat, self.bitspersample)) if self.compression not in TIFF.DECOMPESSORS: raise ValueError( 'cannot decompress %s' % self.compression.name) if 'SampleFormat' in tags: tag = tags['SampleFormat'] if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): raise ValueError( 'sample formats do not match %s' % tag.value) if self.is_subsampled and (self.compression not in (6, 7) or self.planarconfig == 2): raise NotImplementedError('chroma subsampling not supported') if validate is None: return None lock = fh.lock if lock is None else lock with lock: closed = fh.closed if closed: if reopen: fh.open() else: raise IOError('file handle is closed') dtype = self._dtype shape = self._shape imagewidth = self.imagewidth imagelength = self.imagelength imagedepth = self.imagedepth bitspersample = self.bitspersample typecode = byteorder + dtype.char lsb2msb = self.fillorder == 2 istiled = self.is_tiled if istiled: tilewidth = self.tilewidth tilelength = self.tilelength tiledepth = self.tiledepth tw = (imagewidth + tilewidth - 1) // tilewidth tl = (imagelength + tilelength - 1) // tilelength td = (imagedepth + tiledepth - 1) // tiledepth tiledshape = (td, tl, tw) tileshape = (tiledepth, tilelength, tilewidth, shape[-1]) runlen = tilewidth else: runlen = imagewidth if self.planarconfig == 1: runlen *= self.samplesperpixel if isinstance(out, str) and out == 'memmap' and self.is_memmappable: # direct memory map array in file with lock: result = fh.memmap_array(typecode, shape, offset=offsets[0]) elif self.is_contiguous: # read contiguous bytes to array if out is not None: out = create_output(out, shape, dtype) with lock: fh.seek(offsets[0]) result = fh.read_array(typecode, product(shape), out=out) if lsb2msb: bitorder_decode(result, out=result) else: # decompress, unpack,... individual strips or tiles result = create_output(out, shape, dtype) decompress = TIFF.DECOMPESSORS[self.compression] if self.compression in (6, 7): # COMPRESSION.JPEG colorspace = None outcolorspace = None jpegtables = None if lsb2msb: log.warning('TiffPage.asarray: disabling LSB2MSB for JPEG') lsb2msb = False if 'JPEGTables' in tags: # load JPEGTables from TiffFrame jpegtables = self_._gettags({347}, lock=lock)[0][1].value # TODO: obtain table from OJPEG tags # elif ('JPEGInterchangeFormat' in tags and # 'JPEGInterchangeFormatLength' in tags and # tags['JPEGInterchangeFormat'].value != offsets[0]): # fh.seek(tags['JPEGInterchangeFormat'].value) # fh.read(tags['JPEGInterchangeFormatLength'].value) if 'ExtraSamples' in tags: pass elif self.photometric == 6: # YCBCR -> RGB outcolorspace = 'RGB' elif self.photometric == 2: if self.planarconfig == 2: # TODO: decode JPEG to planar RGB raise NotImplementedError( 'cannot decode JPEG to planar RGB') colorspace = outcolorspace = 'RGB' else: outcolorspace = TIFF.PHOTOMETRIC(self.photometric).name if istiled: heightwidth = tilelength, tilewidth else: heightwidth = imagelength, imagewidth def decompress(data, bitspersample=bitspersample, jpegtables=jpegtables, colorspace=colorspace, outcolorspace=outcolorspace, shape=heightwidth, out=None, _decompress=decompress): return _decompress(data, bitspersample, jpegtables, colorspace, outcolorspace, shape, out) def unpack(data): return data.reshape(-1) elif bitspersample in (8, 16, 32, 64, 128): if (bitspersample * runlen) % 8: raise ValueError('data and sample size mismatch') if self.predictor == 3: # PREDICTOR.FLOATINGPOINT # the floating-point horizontal differencing decoder # needs the raw byte order typecode = dtype.char def unpack(data, typecode=typecode, out=None): try: # read only numpy array return numpy.frombuffer(data, typecode) except ValueError: # strips may be missing EOI # log.warning('TiffPage.asarray: ...') bps = bitspersample // 8 xlen = (len(data) // bps) * bps return numpy.frombuffer(data[:xlen], typecode) elif isinstance(bitspersample, tuple): def unpack(data, out=None): return unpack_rgb(data, typecode, bitspersample) else: def unpack(data, out=None): return packints_decode(data, typecode, bitspersample, runlen) # TODO: store decode function for future use # TODO: unify tile and strip decoding if istiled: unpredict = TIFF.UNPREDICTORS[self.predictor] def decode(tile, tileindex): return tile_decode(tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, result[0]) tileiter = buffered_read(fh, lock, offsets, bytecounts) if maxworkers is None: maxworkers = 0 if self.compression > 1 else 1 if maxworkers == 0: import multiprocessing # noqa: delay import maxworkers = multiprocessing.cpu_count() // 2 if maxworkers < 2: for i, tile in enumerate(tileiter): decode(tile, i) else: # decode first tile un-threaded to catch exceptions decode(next(tileiter), 0) with ThreadPoolExecutor(maxworkers) as executor: executor.map(decode, tileiter, range(1, len(offsets))) else: stripsize = self.rowsperstrip * self.imagewidth if self.planarconfig == 1: stripsize *= self.samplesperpixel outsize = stripsize * self.dtype.itemsize result = result.reshape(-1) index = 0 for strip in buffered_read(fh, lock, offsets, bytecounts): if lsb2msb: strip = bitorder_decode(strip, out=strip) strip = decompress(strip, out=outsize) strip = unpack(strip) size = min(result.size, strip.size, stripsize, result.size - index) result[index:index+size] = strip[:size] del strip index += size result.shape = self._shape if self.predictor != 1 and not (istiled and not self.is_contiguous): unpredict = TIFF.UNPREDICTORS[self.predictor] result = unpredict(result, axis=-2, out=result) if squeeze: try: result.shape = self.shape except ValueError: log.warning('TiffPage.asarray: failed to reshape %s to %s', result.shape, self.shape) if closed: # TODO: file should remain open if an exception occurred above fh.close() return result
python
def asarray(self, out=None, squeeze=True, lock=None, reopen=True, maxsize=None, maxworkers=None, validate=True): """Read image data from file and return as numpy array. Raise ValueError if format is unsupported. Parameters ---------- out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. squeeze : bool If True (default), all length-1 dimensions (except X and Y) are squeezed out from the array. If False, the shape of the returned array might be different from the page.shape. lock : {RLock, NullContext} A reentrant lock used to syncronize reads from file. If None (default), the lock of the parent's filehandle is used. reopen : bool If True (default) and the parent file handle is closed, the file is temporarily re-opened and closed if no exception occurs. maxsize: int Maximum size of data before a ValueError is raised. Can be used to catch DOS. Default: 16 TB. maxworkers : int or None Maximum number of threads to concurrently decode tile data. If None (default), up to half the CPU cores are used for compressed tiles. See remarks in TiffFile.asarray. validate : bool If True (default), validate various parameters. If None, only validate parameters and return None. Returns ------- numpy.ndarray Numpy array of decompressed, depredicted, and unpacked image data read from Strip/Tile Offsets/ByteCounts, formatted according to shape and dtype metadata found in tags and parameters. Photometric conversion, pre-multiplied alpha, orientation, and colorimetry corrections are not applied. Specifically, CMYK images are not converted to RGB, MinIsWhite images are not inverted, and color palettes are not applied. """ # properties from TiffPage or TiffFrame fh = self.parent.filehandle byteorder = self.parent.tiff.byteorder offsets, bytecounts = self._offsetscounts self_ = self self = self.keyframe # self or keyframe if not self._shape or product(self._shape) == 0: return None tags = self.tags if validate or validate is None: if maxsize is None: maxsize = 2**44 if maxsize and product(self._shape) > maxsize: raise ValueError('data are too large %s' % str(self._shape)) if self.dtype is None: raise ValueError('data type not supported: %s%i' % ( self.sampleformat, self.bitspersample)) if self.compression not in TIFF.DECOMPESSORS: raise ValueError( 'cannot decompress %s' % self.compression.name) if 'SampleFormat' in tags: tag = tags['SampleFormat'] if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): raise ValueError( 'sample formats do not match %s' % tag.value) if self.is_subsampled and (self.compression not in (6, 7) or self.planarconfig == 2): raise NotImplementedError('chroma subsampling not supported') if validate is None: return None lock = fh.lock if lock is None else lock with lock: closed = fh.closed if closed: if reopen: fh.open() else: raise IOError('file handle is closed') dtype = self._dtype shape = self._shape imagewidth = self.imagewidth imagelength = self.imagelength imagedepth = self.imagedepth bitspersample = self.bitspersample typecode = byteorder + dtype.char lsb2msb = self.fillorder == 2 istiled = self.is_tiled if istiled: tilewidth = self.tilewidth tilelength = self.tilelength tiledepth = self.tiledepth tw = (imagewidth + tilewidth - 1) // tilewidth tl = (imagelength + tilelength - 1) // tilelength td = (imagedepth + tiledepth - 1) // tiledepth tiledshape = (td, tl, tw) tileshape = (tiledepth, tilelength, tilewidth, shape[-1]) runlen = tilewidth else: runlen = imagewidth if self.planarconfig == 1: runlen *= self.samplesperpixel if isinstance(out, str) and out == 'memmap' and self.is_memmappable: # direct memory map array in file with lock: result = fh.memmap_array(typecode, shape, offset=offsets[0]) elif self.is_contiguous: # read contiguous bytes to array if out is not None: out = create_output(out, shape, dtype) with lock: fh.seek(offsets[0]) result = fh.read_array(typecode, product(shape), out=out) if lsb2msb: bitorder_decode(result, out=result) else: # decompress, unpack,... individual strips or tiles result = create_output(out, shape, dtype) decompress = TIFF.DECOMPESSORS[self.compression] if self.compression in (6, 7): # COMPRESSION.JPEG colorspace = None outcolorspace = None jpegtables = None if lsb2msb: log.warning('TiffPage.asarray: disabling LSB2MSB for JPEG') lsb2msb = False if 'JPEGTables' in tags: # load JPEGTables from TiffFrame jpegtables = self_._gettags({347}, lock=lock)[0][1].value # TODO: obtain table from OJPEG tags # elif ('JPEGInterchangeFormat' in tags and # 'JPEGInterchangeFormatLength' in tags and # tags['JPEGInterchangeFormat'].value != offsets[0]): # fh.seek(tags['JPEGInterchangeFormat'].value) # fh.read(tags['JPEGInterchangeFormatLength'].value) if 'ExtraSamples' in tags: pass elif self.photometric == 6: # YCBCR -> RGB outcolorspace = 'RGB' elif self.photometric == 2: if self.planarconfig == 2: # TODO: decode JPEG to planar RGB raise NotImplementedError( 'cannot decode JPEG to planar RGB') colorspace = outcolorspace = 'RGB' else: outcolorspace = TIFF.PHOTOMETRIC(self.photometric).name if istiled: heightwidth = tilelength, tilewidth else: heightwidth = imagelength, imagewidth def decompress(data, bitspersample=bitspersample, jpegtables=jpegtables, colorspace=colorspace, outcolorspace=outcolorspace, shape=heightwidth, out=None, _decompress=decompress): return _decompress(data, bitspersample, jpegtables, colorspace, outcolorspace, shape, out) def unpack(data): return data.reshape(-1) elif bitspersample in (8, 16, 32, 64, 128): if (bitspersample * runlen) % 8: raise ValueError('data and sample size mismatch') if self.predictor == 3: # PREDICTOR.FLOATINGPOINT # the floating-point horizontal differencing decoder # needs the raw byte order typecode = dtype.char def unpack(data, typecode=typecode, out=None): try: # read only numpy array return numpy.frombuffer(data, typecode) except ValueError: # strips may be missing EOI # log.warning('TiffPage.asarray: ...') bps = bitspersample // 8 xlen = (len(data) // bps) * bps return numpy.frombuffer(data[:xlen], typecode) elif isinstance(bitspersample, tuple): def unpack(data, out=None): return unpack_rgb(data, typecode, bitspersample) else: def unpack(data, out=None): return packints_decode(data, typecode, bitspersample, runlen) # TODO: store decode function for future use # TODO: unify tile and strip decoding if istiled: unpredict = TIFF.UNPREDICTORS[self.predictor] def decode(tile, tileindex): return tile_decode(tile, tileindex, tileshape, tiledshape, lsb2msb, decompress, unpack, unpredict, result[0]) tileiter = buffered_read(fh, lock, offsets, bytecounts) if maxworkers is None: maxworkers = 0 if self.compression > 1 else 1 if maxworkers == 0: import multiprocessing # noqa: delay import maxworkers = multiprocessing.cpu_count() // 2 if maxworkers < 2: for i, tile in enumerate(tileiter): decode(tile, i) else: # decode first tile un-threaded to catch exceptions decode(next(tileiter), 0) with ThreadPoolExecutor(maxworkers) as executor: executor.map(decode, tileiter, range(1, len(offsets))) else: stripsize = self.rowsperstrip * self.imagewidth if self.planarconfig == 1: stripsize *= self.samplesperpixel outsize = stripsize * self.dtype.itemsize result = result.reshape(-1) index = 0 for strip in buffered_read(fh, lock, offsets, bytecounts): if lsb2msb: strip = bitorder_decode(strip, out=strip) strip = decompress(strip, out=outsize) strip = unpack(strip) size = min(result.size, strip.size, stripsize, result.size - index) result[index:index+size] = strip[:size] del strip index += size result.shape = self._shape if self.predictor != 1 and not (istiled and not self.is_contiguous): unpredict = TIFF.UNPREDICTORS[self.predictor] result = unpredict(result, axis=-2, out=result) if squeeze: try: result.shape = self.shape except ValueError: log.warning('TiffPage.asarray: failed to reshape %s to %s', result.shape, self.shape) if closed: # TODO: file should remain open if an exception occurred above fh.close() return result
[ "def", "asarray", "(", "self", ",", "out", "=", "None", ",", "squeeze", "=", "True", ",", "lock", "=", "None", ",", "reopen", "=", "True", ",", "maxsize", "=", "None", ",", "maxworkers", "=", "None", ",", "validate", "=", "True", ")", ":", "# properties from TiffPage or TiffFrame", "fh", "=", "self", ".", "parent", ".", "filehandle", "byteorder", "=", "self", ".", "parent", ".", "tiff", ".", "byteorder", "offsets", ",", "bytecounts", "=", "self", ".", "_offsetscounts", "self_", "=", "self", "self", "=", "self", ".", "keyframe", "# self or keyframe", "if", "not", "self", ".", "_shape", "or", "product", "(", "self", ".", "_shape", ")", "==", "0", ":", "return", "None", "tags", "=", "self", ".", "tags", "if", "validate", "or", "validate", "is", "None", ":", "if", "maxsize", "is", "None", ":", "maxsize", "=", "2", "**", "44", "if", "maxsize", "and", "product", "(", "self", ".", "_shape", ")", ">", "maxsize", ":", "raise", "ValueError", "(", "'data are too large %s'", "%", "str", "(", "self", ".", "_shape", ")", ")", "if", "self", ".", "dtype", "is", "None", ":", "raise", "ValueError", "(", "'data type not supported: %s%i'", "%", "(", "self", ".", "sampleformat", ",", "self", ".", "bitspersample", ")", ")", "if", "self", ".", "compression", "not", "in", "TIFF", ".", "DECOMPESSORS", ":", "raise", "ValueError", "(", "'cannot decompress %s'", "%", "self", ".", "compression", ".", "name", ")", "if", "'SampleFormat'", "in", "tags", ":", "tag", "=", "tags", "[", "'SampleFormat'", "]", "if", "tag", ".", "count", "!=", "1", "and", "any", "(", "(", "i", "-", "tag", ".", "value", "[", "0", "]", "for", "i", "in", "tag", ".", "value", ")", ")", ":", "raise", "ValueError", "(", "'sample formats do not match %s'", "%", "tag", ".", "value", ")", "if", "self", ".", "is_subsampled", "and", "(", "self", ".", "compression", "not", "in", "(", "6", ",", "7", ")", "or", "self", ".", "planarconfig", "==", "2", ")", ":", "raise", "NotImplementedError", "(", "'chroma subsampling not supported'", ")", "if", "validate", "is", "None", ":", "return", "None", "lock", "=", "fh", ".", "lock", "if", "lock", "is", "None", "else", "lock", "with", "lock", ":", "closed", "=", "fh", ".", "closed", "if", "closed", ":", "if", "reopen", ":", "fh", ".", "open", "(", ")", "else", ":", "raise", "IOError", "(", "'file handle is closed'", ")", "dtype", "=", "self", ".", "_dtype", "shape", "=", "self", ".", "_shape", "imagewidth", "=", "self", ".", "imagewidth", "imagelength", "=", "self", ".", "imagelength", "imagedepth", "=", "self", ".", "imagedepth", "bitspersample", "=", "self", ".", "bitspersample", "typecode", "=", "byteorder", "+", "dtype", ".", "char", "lsb2msb", "=", "self", ".", "fillorder", "==", "2", "istiled", "=", "self", ".", "is_tiled", "if", "istiled", ":", "tilewidth", "=", "self", ".", "tilewidth", "tilelength", "=", "self", ".", "tilelength", "tiledepth", "=", "self", ".", "tiledepth", "tw", "=", "(", "imagewidth", "+", "tilewidth", "-", "1", ")", "//", "tilewidth", "tl", "=", "(", "imagelength", "+", "tilelength", "-", "1", ")", "//", "tilelength", "td", "=", "(", "imagedepth", "+", "tiledepth", "-", "1", ")", "//", "tiledepth", "tiledshape", "=", "(", "td", ",", "tl", ",", "tw", ")", "tileshape", "=", "(", "tiledepth", ",", "tilelength", ",", "tilewidth", ",", "shape", "[", "-", "1", "]", ")", "runlen", "=", "tilewidth", "else", ":", "runlen", "=", "imagewidth", "if", "self", ".", "planarconfig", "==", "1", ":", "runlen", "*=", "self", ".", "samplesperpixel", "if", "isinstance", "(", "out", ",", "str", ")", "and", "out", "==", "'memmap'", "and", "self", ".", "is_memmappable", ":", "# direct memory map array in file", "with", "lock", ":", "result", "=", "fh", ".", "memmap_array", "(", "typecode", ",", "shape", ",", "offset", "=", "offsets", "[", "0", "]", ")", "elif", "self", ".", "is_contiguous", ":", "# read contiguous bytes to array", "if", "out", "is", "not", "None", ":", "out", "=", "create_output", "(", "out", ",", "shape", ",", "dtype", ")", "with", "lock", ":", "fh", ".", "seek", "(", "offsets", "[", "0", "]", ")", "result", "=", "fh", ".", "read_array", "(", "typecode", ",", "product", "(", "shape", ")", ",", "out", "=", "out", ")", "if", "lsb2msb", ":", "bitorder_decode", "(", "result", ",", "out", "=", "result", ")", "else", ":", "# decompress, unpack,... individual strips or tiles", "result", "=", "create_output", "(", "out", ",", "shape", ",", "dtype", ")", "decompress", "=", "TIFF", ".", "DECOMPESSORS", "[", "self", ".", "compression", "]", "if", "self", ".", "compression", "in", "(", "6", ",", "7", ")", ":", "# COMPRESSION.JPEG", "colorspace", "=", "None", "outcolorspace", "=", "None", "jpegtables", "=", "None", "if", "lsb2msb", ":", "log", ".", "warning", "(", "'TiffPage.asarray: disabling LSB2MSB for JPEG'", ")", "lsb2msb", "=", "False", "if", "'JPEGTables'", "in", "tags", ":", "# load JPEGTables from TiffFrame", "jpegtables", "=", "self_", ".", "_gettags", "(", "{", "347", "}", ",", "lock", "=", "lock", ")", "[", "0", "]", "[", "1", "]", ".", "value", "# TODO: obtain table from OJPEG tags", "# elif ('JPEGInterchangeFormat' in tags and", "# 'JPEGInterchangeFormatLength' in tags and", "# tags['JPEGInterchangeFormat'].value != offsets[0]):", "# fh.seek(tags['JPEGInterchangeFormat'].value)", "# fh.read(tags['JPEGInterchangeFormatLength'].value)", "if", "'ExtraSamples'", "in", "tags", ":", "pass", "elif", "self", ".", "photometric", "==", "6", ":", "# YCBCR -> RGB", "outcolorspace", "=", "'RGB'", "elif", "self", ".", "photometric", "==", "2", ":", "if", "self", ".", "planarconfig", "==", "2", ":", "# TODO: decode JPEG to planar RGB", "raise", "NotImplementedError", "(", "'cannot decode JPEG to planar RGB'", ")", "colorspace", "=", "outcolorspace", "=", "'RGB'", "else", ":", "outcolorspace", "=", "TIFF", ".", "PHOTOMETRIC", "(", "self", ".", "photometric", ")", ".", "name", "if", "istiled", ":", "heightwidth", "=", "tilelength", ",", "tilewidth", "else", ":", "heightwidth", "=", "imagelength", ",", "imagewidth", "def", "decompress", "(", "data", ",", "bitspersample", "=", "bitspersample", ",", "jpegtables", "=", "jpegtables", ",", "colorspace", "=", "colorspace", ",", "outcolorspace", "=", "outcolorspace", ",", "shape", "=", "heightwidth", ",", "out", "=", "None", ",", "_decompress", "=", "decompress", ")", ":", "return", "_decompress", "(", "data", ",", "bitspersample", ",", "jpegtables", ",", "colorspace", ",", "outcolorspace", ",", "shape", ",", "out", ")", "def", "unpack", "(", "data", ")", ":", "return", "data", ".", "reshape", "(", "-", "1", ")", "elif", "bitspersample", "in", "(", "8", ",", "16", ",", "32", ",", "64", ",", "128", ")", ":", "if", "(", "bitspersample", "*", "runlen", ")", "%", "8", ":", "raise", "ValueError", "(", "'data and sample size mismatch'", ")", "if", "self", ".", "predictor", "==", "3", ":", "# PREDICTOR.FLOATINGPOINT", "# the floating-point horizontal differencing decoder", "# needs the raw byte order", "typecode", "=", "dtype", ".", "char", "def", "unpack", "(", "data", ",", "typecode", "=", "typecode", ",", "out", "=", "None", ")", ":", "try", ":", "# read only numpy array", "return", "numpy", ".", "frombuffer", "(", "data", ",", "typecode", ")", "except", "ValueError", ":", "# strips may be missing EOI", "# log.warning('TiffPage.asarray: ...')", "bps", "=", "bitspersample", "//", "8", "xlen", "=", "(", "len", "(", "data", ")", "//", "bps", ")", "*", "bps", "return", "numpy", ".", "frombuffer", "(", "data", "[", ":", "xlen", "]", ",", "typecode", ")", "elif", "isinstance", "(", "bitspersample", ",", "tuple", ")", ":", "def", "unpack", "(", "data", ",", "out", "=", "None", ")", ":", "return", "unpack_rgb", "(", "data", ",", "typecode", ",", "bitspersample", ")", "else", ":", "def", "unpack", "(", "data", ",", "out", "=", "None", ")", ":", "return", "packints_decode", "(", "data", ",", "typecode", ",", "bitspersample", ",", "runlen", ")", "# TODO: store decode function for future use", "# TODO: unify tile and strip decoding", "if", "istiled", ":", "unpredict", "=", "TIFF", ".", "UNPREDICTORS", "[", "self", ".", "predictor", "]", "def", "decode", "(", "tile", ",", "tileindex", ")", ":", "return", "tile_decode", "(", "tile", ",", "tileindex", ",", "tileshape", ",", "tiledshape", ",", "lsb2msb", ",", "decompress", ",", "unpack", ",", "unpredict", ",", "result", "[", "0", "]", ")", "tileiter", "=", "buffered_read", "(", "fh", ",", "lock", ",", "offsets", ",", "bytecounts", ")", "if", "maxworkers", "is", "None", ":", "maxworkers", "=", "0", "if", "self", ".", "compression", ">", "1", "else", "1", "if", "maxworkers", "==", "0", ":", "import", "multiprocessing", "# noqa: delay import", "maxworkers", "=", "multiprocessing", ".", "cpu_count", "(", ")", "//", "2", "if", "maxworkers", "<", "2", ":", "for", "i", ",", "tile", "in", "enumerate", "(", "tileiter", ")", ":", "decode", "(", "tile", ",", "i", ")", "else", ":", "# decode first tile un-threaded to catch exceptions", "decode", "(", "next", "(", "tileiter", ")", ",", "0", ")", "with", "ThreadPoolExecutor", "(", "maxworkers", ")", "as", "executor", ":", "executor", ".", "map", "(", "decode", ",", "tileiter", ",", "range", "(", "1", ",", "len", "(", "offsets", ")", ")", ")", "else", ":", "stripsize", "=", "self", ".", "rowsperstrip", "*", "self", ".", "imagewidth", "if", "self", ".", "planarconfig", "==", "1", ":", "stripsize", "*=", "self", ".", "samplesperpixel", "outsize", "=", "stripsize", "*", "self", ".", "dtype", ".", "itemsize", "result", "=", "result", ".", "reshape", "(", "-", "1", ")", "index", "=", "0", "for", "strip", "in", "buffered_read", "(", "fh", ",", "lock", ",", "offsets", ",", "bytecounts", ")", ":", "if", "lsb2msb", ":", "strip", "=", "bitorder_decode", "(", "strip", ",", "out", "=", "strip", ")", "strip", "=", "decompress", "(", "strip", ",", "out", "=", "outsize", ")", "strip", "=", "unpack", "(", "strip", ")", "size", "=", "min", "(", "result", ".", "size", ",", "strip", ".", "size", ",", "stripsize", ",", "result", ".", "size", "-", "index", ")", "result", "[", "index", ":", "index", "+", "size", "]", "=", "strip", "[", ":", "size", "]", "del", "strip", "index", "+=", "size", "result", ".", "shape", "=", "self", ".", "_shape", "if", "self", ".", "predictor", "!=", "1", "and", "not", "(", "istiled", "and", "not", "self", ".", "is_contiguous", ")", ":", "unpredict", "=", "TIFF", ".", "UNPREDICTORS", "[", "self", ".", "predictor", "]", "result", "=", "unpredict", "(", "result", ",", "axis", "=", "-", "2", ",", "out", "=", "result", ")", "if", "squeeze", ":", "try", ":", "result", ".", "shape", "=", "self", ".", "shape", "except", "ValueError", ":", "log", ".", "warning", "(", "'TiffPage.asarray: failed to reshape %s to %s'", ",", "result", ".", "shape", ",", "self", ".", "shape", ")", "if", "closed", ":", "# TODO: file should remain open if an exception occurred above", "fh", ".", "close", "(", ")", "return", "result" ]
Read image data from file and return as numpy array. Raise ValueError if format is unsupported. Parameters ---------- out : numpy.ndarray, str, or file-like object Buffer where image data will be saved. If None (default), a new array will be created. If numpy.ndarray, a writable array of compatible dtype and shape. If 'memmap', directly memory-map the image data in the TIFF file if possible; else create a memory-mapped array in a temporary file. If str or open file, the file name or file object used to create a memory-map to an array stored in a binary file on disk. squeeze : bool If True (default), all length-1 dimensions (except X and Y) are squeezed out from the array. If False, the shape of the returned array might be different from the page.shape. lock : {RLock, NullContext} A reentrant lock used to syncronize reads from file. If None (default), the lock of the parent's filehandle is used. reopen : bool If True (default) and the parent file handle is closed, the file is temporarily re-opened and closed if no exception occurs. maxsize: int Maximum size of data before a ValueError is raised. Can be used to catch DOS. Default: 16 TB. maxworkers : int or None Maximum number of threads to concurrently decode tile data. If None (default), up to half the CPU cores are used for compressed tiles. See remarks in TiffFile.asarray. validate : bool If True (default), validate various parameters. If None, only validate parameters and return None. Returns ------- numpy.ndarray Numpy array of decompressed, depredicted, and unpacked image data read from Strip/Tile Offsets/ByteCounts, formatted according to shape and dtype metadata found in tags and parameters. Photometric conversion, pre-multiplied alpha, orientation, and colorimetry corrections are not applied. Specifically, CMYK images are not converted to RGB, MinIsWhite images are not inverted, and color palettes are not applied.
[ "Read", "image", "data", "from", "file", "and", "return", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3879-L4151
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.asrgb
def asrgb(self, uint8=False, alpha=None, colormap=None, dmin=None, dmax=None, **kwargs): """Return image data as RGB(A). Work in progress. """ data = self.asarray(**kwargs) self = self.keyframe # self or keyframe photometric = self.photometric PHOTOMETRIC = TIFF.PHOTOMETRIC if photometric == PHOTOMETRIC.PALETTE: colormap = self.colormap if (colormap.shape[1] < 2**self.bitspersample or self.dtype.char not in 'BH'): raise ValueError('cannot apply colormap') if uint8: if colormap.max() > 255: colormap >>= 8 colormap = colormap.astype('uint8') if 'S' in self.axes: data = data[..., 0] if self.planarconfig == 1 else data[0] data = apply_colormap(data, colormap) elif photometric == PHOTOMETRIC.RGB: if 'ExtraSamples' in self.tags: if alpha is None: alpha = TIFF.EXTRASAMPLE extrasamples = self.extrasamples if self.tags['ExtraSamples'].count == 1: extrasamples = (extrasamples,) for i, exs in enumerate(extrasamples): if exs in alpha: if self.planarconfig == 1: data = data[..., [0, 1, 2, 3+i]] else: data = data[:, [0, 1, 2, 3+i]] break else: if self.planarconfig == 1: data = data[..., :3] else: data = data[:, :3] # TODO: convert to uint8? elif photometric == PHOTOMETRIC.MINISBLACK: raise NotImplementedError() elif photometric == PHOTOMETRIC.MINISWHITE: raise NotImplementedError() elif photometric == PHOTOMETRIC.SEPARATED: raise NotImplementedError() else: raise NotImplementedError() return data
python
def asrgb(self, uint8=False, alpha=None, colormap=None, dmin=None, dmax=None, **kwargs): """Return image data as RGB(A). Work in progress. """ data = self.asarray(**kwargs) self = self.keyframe # self or keyframe photometric = self.photometric PHOTOMETRIC = TIFF.PHOTOMETRIC if photometric == PHOTOMETRIC.PALETTE: colormap = self.colormap if (colormap.shape[1] < 2**self.bitspersample or self.dtype.char not in 'BH'): raise ValueError('cannot apply colormap') if uint8: if colormap.max() > 255: colormap >>= 8 colormap = colormap.astype('uint8') if 'S' in self.axes: data = data[..., 0] if self.planarconfig == 1 else data[0] data = apply_colormap(data, colormap) elif photometric == PHOTOMETRIC.RGB: if 'ExtraSamples' in self.tags: if alpha is None: alpha = TIFF.EXTRASAMPLE extrasamples = self.extrasamples if self.tags['ExtraSamples'].count == 1: extrasamples = (extrasamples,) for i, exs in enumerate(extrasamples): if exs in alpha: if self.planarconfig == 1: data = data[..., [0, 1, 2, 3+i]] else: data = data[:, [0, 1, 2, 3+i]] break else: if self.planarconfig == 1: data = data[..., :3] else: data = data[:, :3] # TODO: convert to uint8? elif photometric == PHOTOMETRIC.MINISBLACK: raise NotImplementedError() elif photometric == PHOTOMETRIC.MINISWHITE: raise NotImplementedError() elif photometric == PHOTOMETRIC.SEPARATED: raise NotImplementedError() else: raise NotImplementedError() return data
[ "def", "asrgb", "(", "self", ",", "uint8", "=", "False", ",", "alpha", "=", "None", ",", "colormap", "=", "None", ",", "dmin", "=", "None", ",", "dmax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "asarray", "(", "*", "*", "kwargs", ")", "self", "=", "self", ".", "keyframe", "# self or keyframe", "photometric", "=", "self", ".", "photometric", "PHOTOMETRIC", "=", "TIFF", ".", "PHOTOMETRIC", "if", "photometric", "==", "PHOTOMETRIC", ".", "PALETTE", ":", "colormap", "=", "self", ".", "colormap", "if", "(", "colormap", ".", "shape", "[", "1", "]", "<", "2", "**", "self", ".", "bitspersample", "or", "self", ".", "dtype", ".", "char", "not", "in", "'BH'", ")", ":", "raise", "ValueError", "(", "'cannot apply colormap'", ")", "if", "uint8", ":", "if", "colormap", ".", "max", "(", ")", ">", "255", ":", "colormap", ">>=", "8", "colormap", "=", "colormap", ".", "astype", "(", "'uint8'", ")", "if", "'S'", "in", "self", ".", "axes", ":", "data", "=", "data", "[", "...", ",", "0", "]", "if", "self", ".", "planarconfig", "==", "1", "else", "data", "[", "0", "]", "data", "=", "apply_colormap", "(", "data", ",", "colormap", ")", "elif", "photometric", "==", "PHOTOMETRIC", ".", "RGB", ":", "if", "'ExtraSamples'", "in", "self", ".", "tags", ":", "if", "alpha", "is", "None", ":", "alpha", "=", "TIFF", ".", "EXTRASAMPLE", "extrasamples", "=", "self", ".", "extrasamples", "if", "self", ".", "tags", "[", "'ExtraSamples'", "]", ".", "count", "==", "1", ":", "extrasamples", "=", "(", "extrasamples", ",", ")", "for", "i", ",", "exs", "in", "enumerate", "(", "extrasamples", ")", ":", "if", "exs", "in", "alpha", ":", "if", "self", ".", "planarconfig", "==", "1", ":", "data", "=", "data", "[", "...", ",", "[", "0", ",", "1", ",", "2", ",", "3", "+", "i", "]", "]", "else", ":", "data", "=", "data", "[", ":", ",", "[", "0", ",", "1", ",", "2", ",", "3", "+", "i", "]", "]", "break", "else", ":", "if", "self", ".", "planarconfig", "==", "1", ":", "data", "=", "data", "[", "...", ",", ":", "3", "]", "else", ":", "data", "=", "data", "[", ":", ",", ":", "3", "]", "# TODO: convert to uint8?", "elif", "photometric", "==", "PHOTOMETRIC", ".", "MINISBLACK", ":", "raise", "NotImplementedError", "(", ")", "elif", "photometric", "==", "PHOTOMETRIC", ".", "MINISWHITE", ":", "raise", "NotImplementedError", "(", ")", "elif", "photometric", "==", "PHOTOMETRIC", ".", "SEPARATED", ":", "raise", "NotImplementedError", "(", ")", "else", ":", "raise", "NotImplementedError", "(", ")", "return", "data" ]
Return image data as RGB(A). Work in progress.
[ "Return", "image", "data", "as", "RGB", "(", "A", ")", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4153-L4207
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage._gettags
def _gettags(self, codes=None, lock=None): """Return list of (code, TiffTag).""" tags = [] for tag in self.tags.values(): code = tag.code if not codes or code in codes: tags.append((code, tag)) return tags
python
def _gettags(self, codes=None, lock=None): """Return list of (code, TiffTag).""" tags = [] for tag in self.tags.values(): code = tag.code if not codes or code in codes: tags.append((code, tag)) return tags
[ "def", "_gettags", "(", "self", ",", "codes", "=", "None", ",", "lock", "=", "None", ")", ":", "tags", "=", "[", "]", "for", "tag", "in", "self", ".", "tags", ".", "values", "(", ")", ":", "code", "=", "tag", ".", "code", "if", "not", "codes", "or", "code", "in", "codes", ":", "tags", ".", "append", "(", "(", "code", ",", "tag", ")", ")", "return", "tags" ]
Return list of (code, TiffTag).
[ "Return", "list", "of", "(", "code", "TiffTag", ")", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4209-L4216
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.hash
def hash(self): """Return checksum to identify pages in same series.""" return hash( self._shape + ( self.tilewidth, self.tilelength, self.tiledepth, self.bitspersample, self.fillorder, self.predictor, self.extrasamples, self.photometric, self.compression, self.planarconfig))
python
def hash(self): """Return checksum to identify pages in same series.""" return hash( self._shape + ( self.tilewidth, self.tilelength, self.tiledepth, self.bitspersample, self.fillorder, self.predictor, self.extrasamples, self.photometric, self.compression, self.planarconfig))
[ "def", "hash", "(", "self", ")", ":", "return", "hash", "(", "self", ".", "_shape", "+", "(", "self", ".", "tilewidth", ",", "self", ".", "tilelength", ",", "self", ".", "tiledepth", ",", "self", ".", "bitspersample", ",", "self", ".", "fillorder", ",", "self", ".", "predictor", ",", "self", ".", "extrasamples", ",", "self", ".", "photometric", ",", "self", ".", "compression", ",", "self", ".", "planarconfig", ")", ")" ]
Return checksum to identify pages in same series.
[ "Return", "checksum", "to", "identify", "pages", "in", "same", "series", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4240-L4247
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage._offsetscounts
def _offsetscounts(self): """Return simplified offsets and bytecounts.""" if self.is_contiguous: offset, bytecount = self.is_contiguous return [offset], [bytecount] if self.is_tiled: return self.dataoffsets, self.databytecounts return clean_offsetscounts(self.dataoffsets, self.databytecounts)
python
def _offsetscounts(self): """Return simplified offsets and bytecounts.""" if self.is_contiguous: offset, bytecount = self.is_contiguous return [offset], [bytecount] if self.is_tiled: return self.dataoffsets, self.databytecounts return clean_offsetscounts(self.dataoffsets, self.databytecounts)
[ "def", "_offsetscounts", "(", "self", ")", ":", "if", "self", ".", "is_contiguous", ":", "offset", ",", "bytecount", "=", "self", ".", "is_contiguous", "return", "[", "offset", "]", ",", "[", "bytecount", "]", "if", "self", ".", "is_tiled", ":", "return", "self", ".", "dataoffsets", ",", "self", ".", "databytecounts", "return", "clean_offsetscounts", "(", "self", ".", "dataoffsets", ",", "self", ".", "databytecounts", ")" ]
Return simplified offsets and bytecounts.
[ "Return", "simplified", "offsets", "and", "bytecounts", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4250-L4257
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_contiguous
def is_contiguous(self): """Return offset and size of contiguous data, else None. Excludes prediction and fill_order. """ if (self.compression != 1 or self.bitspersample not in (8, 16, 32, 64)): return None if 'TileWidth' in self.tags: if (self.imagewidth != self.tilewidth or self.imagelength % self.tilelength or self.tilewidth % 16 or self.tilelength % 16): return None if ('ImageDepth' in self.tags and 'TileDepth' in self.tags and (self.imagelength != self.tilelength or self.imagedepth % self.tiledepth)): return None offsets = self.dataoffsets bytecounts = self.databytecounts if len(offsets) == 1: return offsets[0], bytecounts[0] if self.is_stk or all((offsets[i] + bytecounts[i] == offsets[i+1] or bytecounts[i+1] == 0) # no data/ignore offset for i in range(len(offsets)-1)): return offsets[0], sum(bytecounts) return None
python
def is_contiguous(self): """Return offset and size of contiguous data, else None. Excludes prediction and fill_order. """ if (self.compression != 1 or self.bitspersample not in (8, 16, 32, 64)): return None if 'TileWidth' in self.tags: if (self.imagewidth != self.tilewidth or self.imagelength % self.tilelength or self.tilewidth % 16 or self.tilelength % 16): return None if ('ImageDepth' in self.tags and 'TileDepth' in self.tags and (self.imagelength != self.tilelength or self.imagedepth % self.tiledepth)): return None offsets = self.dataoffsets bytecounts = self.databytecounts if len(offsets) == 1: return offsets[0], bytecounts[0] if self.is_stk or all((offsets[i] + bytecounts[i] == offsets[i+1] or bytecounts[i+1] == 0) # no data/ignore offset for i in range(len(offsets)-1)): return offsets[0], sum(bytecounts) return None
[ "def", "is_contiguous", "(", "self", ")", ":", "if", "(", "self", ".", "compression", "!=", "1", "or", "self", ".", "bitspersample", "not", "in", "(", "8", ",", "16", ",", "32", ",", "64", ")", ")", ":", "return", "None", "if", "'TileWidth'", "in", "self", ".", "tags", ":", "if", "(", "self", ".", "imagewidth", "!=", "self", ".", "tilewidth", "or", "self", ".", "imagelength", "%", "self", ".", "tilelength", "or", "self", ".", "tilewidth", "%", "16", "or", "self", ".", "tilelength", "%", "16", ")", ":", "return", "None", "if", "(", "'ImageDepth'", "in", "self", ".", "tags", "and", "'TileDepth'", "in", "self", ".", "tags", "and", "(", "self", ".", "imagelength", "!=", "self", ".", "tilelength", "or", "self", ".", "imagedepth", "%", "self", ".", "tiledepth", ")", ")", ":", "return", "None", "offsets", "=", "self", ".", "dataoffsets", "bytecounts", "=", "self", ".", "databytecounts", "if", "len", "(", "offsets", ")", "==", "1", ":", "return", "offsets", "[", "0", "]", ",", "bytecounts", "[", "0", "]", "if", "self", ".", "is_stk", "or", "all", "(", "(", "offsets", "[", "i", "]", "+", "bytecounts", "[", "i", "]", "==", "offsets", "[", "i", "+", "1", "]", "or", "bytecounts", "[", "i", "+", "1", "]", "==", "0", ")", "# no data/ignore offset", "for", "i", "in", "range", "(", "len", "(", "offsets", ")", "-", "1", ")", ")", ":", "return", "offsets", "[", "0", "]", ",", "sum", "(", "bytecounts", ")", "return", "None" ]
Return offset and size of contiguous data, else None. Excludes prediction and fill_order.
[ "Return", "offset", "and", "size", "of", "contiguous", "data", "else", "None", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4260-L4287
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_final
def is_final(self): """Return if page's image data are stored in final form. Excludes byte-swapping. """ return (self.is_contiguous and self.fillorder == 1 and self.predictor == 1 and not self.is_subsampled)
python
def is_final(self): """Return if page's image data are stored in final form. Excludes byte-swapping. """ return (self.is_contiguous and self.fillorder == 1 and self.predictor == 1 and not self.is_subsampled)
[ "def", "is_final", "(", "self", ")", ":", "return", "(", "self", ".", "is_contiguous", "and", "self", ".", "fillorder", "==", "1", "and", "self", ".", "predictor", "==", "1", "and", "not", "self", ".", "is_subsampled", ")" ]
Return if page's image data are stored in final form. Excludes byte-swapping.
[ "Return", "if", "page", "s", "image", "data", "are", "stored", "in", "final", "form", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4290-L4297
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_memmappable
def is_memmappable(self): """Return if page's image data in file can be memory-mapped.""" return (self.parent.filehandle.is_file and self.is_final and # (self.bitspersample == 8 or self.parent.isnative) and self.is_contiguous[0] % self.dtype.itemsize == 0)
python
def is_memmappable(self): """Return if page's image data in file can be memory-mapped.""" return (self.parent.filehandle.is_file and self.is_final and # (self.bitspersample == 8 or self.parent.isnative) and self.is_contiguous[0] % self.dtype.itemsize == 0)
[ "def", "is_memmappable", "(", "self", ")", ":", "return", "(", "self", ".", "parent", ".", "filehandle", ".", "is_file", "and", "self", ".", "is_final", "and", "# (self.bitspersample == 8 or self.parent.isnative) and", "self", ".", "is_contiguous", "[", "0", "]", "%", "self", ".", "dtype", ".", "itemsize", "==", "0", ")" ]
Return if page's image data in file can be memory-mapped.
[ "Return", "if", "page", "s", "image", "data", "in", "file", "can", "be", "memory", "-", "mapped", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4300-L4304
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.flags
def flags(self): """Return set of flags.""" return set((name.lower() for name in sorted(TIFF.FILE_FLAGS) if getattr(self, 'is_' + name)))
python
def flags(self): """Return set of flags.""" return set((name.lower() for name in sorted(TIFF.FILE_FLAGS) if getattr(self, 'is_' + name)))
[ "def", "flags", "(", "self", ")", ":", "return", "set", "(", "(", "name", ".", "lower", "(", ")", "for", "name", "in", "sorted", "(", "TIFF", ".", "FILE_FLAGS", ")", "if", "getattr", "(", "self", ",", "'is_'", "+", "name", ")", ")", ")" ]
Return set of flags.
[ "Return", "set", "of", "flags", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4366-L4369
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.andor_tags
def andor_tags(self): """Return consolidated metadata from Andor tags as dict. Remove Andor tags from self.tags. """ if not self.is_andor: return None tags = self.tags result = {'Id': tags['AndorId'].value} for tag in list(self.tags.values()): code = tag.code if not 4864 < code < 5031: continue value = tag.value name = tag.name[5:] if len(tag.name) > 5 else tag.name result[name] = value del tags[tag.name] return result
python
def andor_tags(self): """Return consolidated metadata from Andor tags as dict. Remove Andor tags from self.tags. """ if not self.is_andor: return None tags = self.tags result = {'Id': tags['AndorId'].value} for tag in list(self.tags.values()): code = tag.code if not 4864 < code < 5031: continue value = tag.value name = tag.name[5:] if len(tag.name) > 5 else tag.name result[name] = value del tags[tag.name] return result
[ "def", "andor_tags", "(", "self", ")", ":", "if", "not", "self", ".", "is_andor", ":", "return", "None", "tags", "=", "self", ".", "tags", "result", "=", "{", "'Id'", ":", "tags", "[", "'AndorId'", "]", ".", "value", "}", "for", "tag", "in", "list", "(", "self", ".", "tags", ".", "values", "(", ")", ")", ":", "code", "=", "tag", ".", "code", "if", "not", "4864", "<", "code", "<", "5031", ":", "continue", "value", "=", "tag", ".", "value", "name", "=", "tag", ".", "name", "[", "5", ":", "]", "if", "len", "(", "tag", ".", "name", ")", ">", "5", "else", "tag", ".", "name", "result", "[", "name", "]", "=", "value", "del", "tags", "[", "tag", ".", "name", "]", "return", "result" ]
Return consolidated metadata from Andor tags as dict. Remove Andor tags from self.tags.
[ "Return", "consolidated", "metadata", "from", "Andor", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4382-L4400
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.epics_tags
def epics_tags(self): """Return consolidated metadata from EPICS areaDetector tags as dict. Remove areaDetector tags from self.tags. """ if not self.is_epics: return None result = {} tags = self.tags for tag in list(self.tags.values()): code = tag.code if not 65000 <= code < 65500: continue value = tag.value if code == 65000: result['timeStamp'] = datetime.datetime.fromtimestamp( float(value)) elif code == 65001: result['uniqueID'] = int(value) elif code == 65002: result['epicsTSSec'] = int(value) elif code == 65003: result['epicsTSNsec'] = int(value) else: key, value = value.split(':', 1) result[key] = astype(value) del tags[tag.name] return result
python
def epics_tags(self): """Return consolidated metadata from EPICS areaDetector tags as dict. Remove areaDetector tags from self.tags. """ if not self.is_epics: return None result = {} tags = self.tags for tag in list(self.tags.values()): code = tag.code if not 65000 <= code < 65500: continue value = tag.value if code == 65000: result['timeStamp'] = datetime.datetime.fromtimestamp( float(value)) elif code == 65001: result['uniqueID'] = int(value) elif code == 65002: result['epicsTSSec'] = int(value) elif code == 65003: result['epicsTSNsec'] = int(value) else: key, value = value.split(':', 1) result[key] = astype(value) del tags[tag.name] return result
[ "def", "epics_tags", "(", "self", ")", ":", "if", "not", "self", ".", "is_epics", ":", "return", "None", "result", "=", "{", "}", "tags", "=", "self", ".", "tags", "for", "tag", "in", "list", "(", "self", ".", "tags", ".", "values", "(", ")", ")", ":", "code", "=", "tag", ".", "code", "if", "not", "65000", "<=", "code", "<", "65500", ":", "continue", "value", "=", "tag", ".", "value", "if", "code", "==", "65000", ":", "result", "[", "'timeStamp'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "float", "(", "value", ")", ")", "elif", "code", "==", "65001", ":", "result", "[", "'uniqueID'", "]", "=", "int", "(", "value", ")", "elif", "code", "==", "65002", ":", "result", "[", "'epicsTSSec'", "]", "=", "int", "(", "value", ")", "elif", "code", "==", "65003", ":", "result", "[", "'epicsTSNsec'", "]", "=", "int", "(", "value", ")", "else", ":", "key", ",", "value", "=", "value", ".", "split", "(", "':'", ",", "1", ")", "result", "[", "key", "]", "=", "astype", "(", "value", ")", "del", "tags", "[", "tag", ".", "name", "]", "return", "result" ]
Return consolidated metadata from EPICS areaDetector tags as dict. Remove areaDetector tags from self.tags.
[ "Return", "consolidated", "metadata", "from", "EPICS", "areaDetector", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4403-L4431
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.ndpi_tags
def ndpi_tags(self): """Return consolidated metadata from Hamamatsu NDPI as dict.""" if not self.is_ndpi: return None tags = self.tags result = {} for name in ('Make', 'Model', 'Software'): result[name] = tags[name].value for code, name in TIFF.NDPI_TAGS.items(): code = str(code) if code in tags: result[name] = tags[code].value # del tags[code] return result
python
def ndpi_tags(self): """Return consolidated metadata from Hamamatsu NDPI as dict.""" if not self.is_ndpi: return None tags = self.tags result = {} for name in ('Make', 'Model', 'Software'): result[name] = tags[name].value for code, name in TIFF.NDPI_TAGS.items(): code = str(code) if code in tags: result[name] = tags[code].value # del tags[code] return result
[ "def", "ndpi_tags", "(", "self", ")", ":", "if", "not", "self", ".", "is_ndpi", ":", "return", "None", "tags", "=", "self", ".", "tags", "result", "=", "{", "}", "for", "name", "in", "(", "'Make'", ",", "'Model'", ",", "'Software'", ")", ":", "result", "[", "name", "]", "=", "tags", "[", "name", "]", ".", "value", "for", "code", ",", "name", "in", "TIFF", ".", "NDPI_TAGS", ".", "items", "(", ")", ":", "code", "=", "str", "(", "code", ")", "if", "code", "in", "tags", ":", "result", "[", "name", "]", "=", "tags", "[", "code", "]", ".", "value", "# del tags[code]", "return", "result" ]
Return consolidated metadata from Hamamatsu NDPI as dict.
[ "Return", "consolidated", "metadata", "from", "Hamamatsu", "NDPI", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4434-L4447
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.geotiff_tags
def geotiff_tags(self): """Return consolidated metadata from GeoTIFF tags as dict.""" if not self.is_geotiff: return None tags = self.tags gkd = tags['GeoKeyDirectoryTag'].value if gkd[0] != 1: log.warning('GeoTIFF tags: invalid GeoKeyDirectoryTag') return {} result = { 'KeyDirectoryVersion': gkd[0], 'KeyRevision': gkd[1], 'KeyRevisionMinor': gkd[2], # 'NumberOfKeys': gkd[3], } # deltags = ['GeoKeyDirectoryTag'] geokeys = TIFF.GEO_KEYS geocodes = TIFF.GEO_CODES for index in range(gkd[3]): keyid, tagid, count, offset = gkd[4 + index * 4: index * 4 + 8] keyid = geokeys.get(keyid, keyid) if tagid == 0: value = offset else: tagname = TIFF.TAGS[tagid] # deltags.append(tagname) value = tags[tagname].value[offset: offset + count] if tagid == 34737 and count > 1 and value[-1] == '|': value = value[:-1] value = value if count > 1 else value[0] if keyid in geocodes: try: value = geocodes[keyid](value) except Exception: pass result[keyid] = value if 'IntergraphMatrixTag' in tags: value = tags['IntergraphMatrixTag'].value value = numpy.array(value) if len(value) == 16: value = value.reshape((4, 4)).tolist() result['IntergraphMatrix'] = value if 'ModelPixelScaleTag' in tags: value = numpy.array(tags['ModelPixelScaleTag'].value).tolist() result['ModelPixelScale'] = value if 'ModelTiepointTag' in tags: value = tags['ModelTiepointTag'].value value = numpy.array(value).reshape((-1, 6)).squeeze().tolist() result['ModelTiepoint'] = value if 'ModelTransformationTag' in tags: value = tags['ModelTransformationTag'].value value = numpy.array(value).reshape((4, 4)).tolist() result['ModelTransformation'] = value # if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags: # sx, sy, sz = tags['ModelPixelScaleTag'].value # tiepoints = tags['ModelTiepointTag'].value # transforms = [] # for tp in range(0, len(tiepoints), 6): # i, j, k, x, y, z = tiepoints[tp:tp+6] # transforms.append([ # [sx, 0.0, 0.0, x - i * sx], # [0.0, -sy, 0.0, y + j * sy], # [0.0, 0.0, sz, z - k * sz], # [0.0, 0.0, 0.0, 1.0]]) # if len(tiepoints) == 6: # transforms = transforms[0] # result['ModelTransformation'] = transforms if 'RPCCoefficientTag' in tags: rpcc = tags['RPCCoefficientTag'].value result['RPCCoefficient'] = { 'ERR_BIAS': rpcc[0], 'ERR_RAND': rpcc[1], 'LINE_OFF': rpcc[2], 'SAMP_OFF': rpcc[3], 'LAT_OFF': rpcc[4], 'LONG_OFF': rpcc[5], 'HEIGHT_OFF': rpcc[6], 'LINE_SCALE': rpcc[7], 'SAMP_SCALE': rpcc[8], 'LAT_SCALE': rpcc[9], 'LONG_SCALE': rpcc[10], 'HEIGHT_SCALE': rpcc[11], 'LINE_NUM_COEFF': rpcc[12:33], 'LINE_DEN_COEFF ': rpcc[33:53], 'SAMP_NUM_COEFF': rpcc[53:73], 'SAMP_DEN_COEFF': rpcc[73:]} return result
python
def geotiff_tags(self): """Return consolidated metadata from GeoTIFF tags as dict.""" if not self.is_geotiff: return None tags = self.tags gkd = tags['GeoKeyDirectoryTag'].value if gkd[0] != 1: log.warning('GeoTIFF tags: invalid GeoKeyDirectoryTag') return {} result = { 'KeyDirectoryVersion': gkd[0], 'KeyRevision': gkd[1], 'KeyRevisionMinor': gkd[2], # 'NumberOfKeys': gkd[3], } # deltags = ['GeoKeyDirectoryTag'] geokeys = TIFF.GEO_KEYS geocodes = TIFF.GEO_CODES for index in range(gkd[3]): keyid, tagid, count, offset = gkd[4 + index * 4: index * 4 + 8] keyid = geokeys.get(keyid, keyid) if tagid == 0: value = offset else: tagname = TIFF.TAGS[tagid] # deltags.append(tagname) value = tags[tagname].value[offset: offset + count] if tagid == 34737 and count > 1 and value[-1] == '|': value = value[:-1] value = value if count > 1 else value[0] if keyid in geocodes: try: value = geocodes[keyid](value) except Exception: pass result[keyid] = value if 'IntergraphMatrixTag' in tags: value = tags['IntergraphMatrixTag'].value value = numpy.array(value) if len(value) == 16: value = value.reshape((4, 4)).tolist() result['IntergraphMatrix'] = value if 'ModelPixelScaleTag' in tags: value = numpy.array(tags['ModelPixelScaleTag'].value).tolist() result['ModelPixelScale'] = value if 'ModelTiepointTag' in tags: value = tags['ModelTiepointTag'].value value = numpy.array(value).reshape((-1, 6)).squeeze().tolist() result['ModelTiepoint'] = value if 'ModelTransformationTag' in tags: value = tags['ModelTransformationTag'].value value = numpy.array(value).reshape((4, 4)).tolist() result['ModelTransformation'] = value # if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags: # sx, sy, sz = tags['ModelPixelScaleTag'].value # tiepoints = tags['ModelTiepointTag'].value # transforms = [] # for tp in range(0, len(tiepoints), 6): # i, j, k, x, y, z = tiepoints[tp:tp+6] # transforms.append([ # [sx, 0.0, 0.0, x - i * sx], # [0.0, -sy, 0.0, y + j * sy], # [0.0, 0.0, sz, z - k * sz], # [0.0, 0.0, 0.0, 1.0]]) # if len(tiepoints) == 6: # transforms = transforms[0] # result['ModelTransformation'] = transforms if 'RPCCoefficientTag' in tags: rpcc = tags['RPCCoefficientTag'].value result['RPCCoefficient'] = { 'ERR_BIAS': rpcc[0], 'ERR_RAND': rpcc[1], 'LINE_OFF': rpcc[2], 'SAMP_OFF': rpcc[3], 'LAT_OFF': rpcc[4], 'LONG_OFF': rpcc[5], 'HEIGHT_OFF': rpcc[6], 'LINE_SCALE': rpcc[7], 'SAMP_SCALE': rpcc[8], 'LAT_SCALE': rpcc[9], 'LONG_SCALE': rpcc[10], 'HEIGHT_SCALE': rpcc[11], 'LINE_NUM_COEFF': rpcc[12:33], 'LINE_DEN_COEFF ': rpcc[33:53], 'SAMP_NUM_COEFF': rpcc[53:73], 'SAMP_DEN_COEFF': rpcc[73:]} return result
[ "def", "geotiff_tags", "(", "self", ")", ":", "if", "not", "self", ".", "is_geotiff", ":", "return", "None", "tags", "=", "self", ".", "tags", "gkd", "=", "tags", "[", "'GeoKeyDirectoryTag'", "]", ".", "value", "if", "gkd", "[", "0", "]", "!=", "1", ":", "log", ".", "warning", "(", "'GeoTIFF tags: invalid GeoKeyDirectoryTag'", ")", "return", "{", "}", "result", "=", "{", "'KeyDirectoryVersion'", ":", "gkd", "[", "0", "]", ",", "'KeyRevision'", ":", "gkd", "[", "1", "]", ",", "'KeyRevisionMinor'", ":", "gkd", "[", "2", "]", ",", "# 'NumberOfKeys': gkd[3],", "}", "# deltags = ['GeoKeyDirectoryTag']", "geokeys", "=", "TIFF", ".", "GEO_KEYS", "geocodes", "=", "TIFF", ".", "GEO_CODES", "for", "index", "in", "range", "(", "gkd", "[", "3", "]", ")", ":", "keyid", ",", "tagid", ",", "count", ",", "offset", "=", "gkd", "[", "4", "+", "index", "*", "4", ":", "index", "*", "4", "+", "8", "]", "keyid", "=", "geokeys", ".", "get", "(", "keyid", ",", "keyid", ")", "if", "tagid", "==", "0", ":", "value", "=", "offset", "else", ":", "tagname", "=", "TIFF", ".", "TAGS", "[", "tagid", "]", "# deltags.append(tagname)", "value", "=", "tags", "[", "tagname", "]", ".", "value", "[", "offset", ":", "offset", "+", "count", "]", "if", "tagid", "==", "34737", "and", "count", ">", "1", "and", "value", "[", "-", "1", "]", "==", "'|'", ":", "value", "=", "value", "[", ":", "-", "1", "]", "value", "=", "value", "if", "count", ">", "1", "else", "value", "[", "0", "]", "if", "keyid", "in", "geocodes", ":", "try", ":", "value", "=", "geocodes", "[", "keyid", "]", "(", "value", ")", "except", "Exception", ":", "pass", "result", "[", "keyid", "]", "=", "value", "if", "'IntergraphMatrixTag'", "in", "tags", ":", "value", "=", "tags", "[", "'IntergraphMatrixTag'", "]", ".", "value", "value", "=", "numpy", ".", "array", "(", "value", ")", "if", "len", "(", "value", ")", "==", "16", ":", "value", "=", "value", ".", "reshape", "(", "(", "4", ",", "4", ")", ")", ".", "tolist", "(", ")", "result", "[", "'IntergraphMatrix'", "]", "=", "value", "if", "'ModelPixelScaleTag'", "in", "tags", ":", "value", "=", "numpy", ".", "array", "(", "tags", "[", "'ModelPixelScaleTag'", "]", ".", "value", ")", ".", "tolist", "(", ")", "result", "[", "'ModelPixelScale'", "]", "=", "value", "if", "'ModelTiepointTag'", "in", "tags", ":", "value", "=", "tags", "[", "'ModelTiepointTag'", "]", ".", "value", "value", "=", "numpy", ".", "array", "(", "value", ")", ".", "reshape", "(", "(", "-", "1", ",", "6", ")", ")", ".", "squeeze", "(", ")", ".", "tolist", "(", ")", "result", "[", "'ModelTiepoint'", "]", "=", "value", "if", "'ModelTransformationTag'", "in", "tags", ":", "value", "=", "tags", "[", "'ModelTransformationTag'", "]", ".", "value", "value", "=", "numpy", ".", "array", "(", "value", ")", ".", "reshape", "(", "(", "4", ",", "4", ")", ")", ".", "tolist", "(", ")", "result", "[", "'ModelTransformation'", "]", "=", "value", "# if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags:", "# sx, sy, sz = tags['ModelPixelScaleTag'].value", "# tiepoints = tags['ModelTiepointTag'].value", "# transforms = []", "# for tp in range(0, len(tiepoints), 6):", "# i, j, k, x, y, z = tiepoints[tp:tp+6]", "# transforms.append([", "# [sx, 0.0, 0.0, x - i * sx],", "# [0.0, -sy, 0.0, y + j * sy],", "# [0.0, 0.0, sz, z - k * sz],", "# [0.0, 0.0, 0.0, 1.0]])", "# if len(tiepoints) == 6:", "# transforms = transforms[0]", "# result['ModelTransformation'] = transforms", "if", "'RPCCoefficientTag'", "in", "tags", ":", "rpcc", "=", "tags", "[", "'RPCCoefficientTag'", "]", ".", "value", "result", "[", "'RPCCoefficient'", "]", "=", "{", "'ERR_BIAS'", ":", "rpcc", "[", "0", "]", ",", "'ERR_RAND'", ":", "rpcc", "[", "1", "]", ",", "'LINE_OFF'", ":", "rpcc", "[", "2", "]", ",", "'SAMP_OFF'", ":", "rpcc", "[", "3", "]", ",", "'LAT_OFF'", ":", "rpcc", "[", "4", "]", ",", "'LONG_OFF'", ":", "rpcc", "[", "5", "]", ",", "'HEIGHT_OFF'", ":", "rpcc", "[", "6", "]", ",", "'LINE_SCALE'", ":", "rpcc", "[", "7", "]", ",", "'SAMP_SCALE'", ":", "rpcc", "[", "8", "]", ",", "'LAT_SCALE'", ":", "rpcc", "[", "9", "]", ",", "'LONG_SCALE'", ":", "rpcc", "[", "10", "]", ",", "'HEIGHT_SCALE'", ":", "rpcc", "[", "11", "]", ",", "'LINE_NUM_COEFF'", ":", "rpcc", "[", "12", ":", "33", "]", ",", "'LINE_DEN_COEFF '", ":", "rpcc", "[", "33", ":", "53", "]", ",", "'SAMP_NUM_COEFF'", ":", "rpcc", "[", "53", ":", "73", "]", ",", "'SAMP_DEN_COEFF'", ":", "rpcc", "[", "73", ":", "]", "}", "return", "result" ]
Return consolidated metadata from GeoTIFF tags as dict.
[ "Return", "consolidated", "metadata", "from", "GeoTIFF", "tags", "as", "dict", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4450-L4541
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_imagej
def is_imagej(self): """Return ImageJ description if exists, else None.""" for description in (self.description, self.description1): if not description: return None if description[:7] == 'ImageJ=': return description return None
python
def is_imagej(self): """Return ImageJ description if exists, else None.""" for description in (self.description, self.description1): if not description: return None if description[:7] == 'ImageJ=': return description return None
[ "def", "is_imagej", "(", "self", ")", ":", "for", "description", "in", "(", "self", ".", "description", ",", "self", ".", "description1", ")", ":", "if", "not", "description", ":", "return", "None", "if", "description", "[", ":", "7", "]", "==", "'ImageJ='", ":", "return", "description", "return", "None" ]
Return ImageJ description if exists, else None.
[ "Return", "ImageJ", "description", "if", "exists", "else", "None", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4575-L4582
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_shaped
def is_shaped(self): """Return description containing array shape if exists, else None.""" for description in (self.description, self.description1): if not description: return None if description[:1] == '{' and '"shape":' in description: return description if description[:6] == 'shape=': return description return None
python
def is_shaped(self): """Return description containing array shape if exists, else None.""" for description in (self.description, self.description1): if not description: return None if description[:1] == '{' and '"shape":' in description: return description if description[:6] == 'shape=': return description return None
[ "def", "is_shaped", "(", "self", ")", ":", "for", "description", "in", "(", "self", ".", "description", ",", "self", ".", "description1", ")", ":", "if", "not", "description", ":", "return", "None", "if", "description", "[", ":", "1", "]", "==", "'{'", "and", "'\"shape\":'", "in", "description", ":", "return", "description", "if", "description", "[", ":", "6", "]", "==", "'shape='", ":", "return", "description", "return", "None" ]
Return description containing array shape if exists, else None.
[ "Return", "description", "containing", "array", "shape", "if", "exists", "else", "None", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4585-L4594
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_metaseries
def is_metaseries(self): """Page contains MDS MetaSeries metadata in ImageDescription tag.""" if self.index > 1 or self.software != 'MetaSeries': return False d = self.description return d.startswith('<MetaData>') and d.endswith('</MetaData>')
python
def is_metaseries(self): """Page contains MDS MetaSeries metadata in ImageDescription tag.""" if self.index > 1 or self.software != 'MetaSeries': return False d = self.description return d.startswith('<MetaData>') and d.endswith('</MetaData>')
[ "def", "is_metaseries", "(", "self", ")", ":", "if", "self", ".", "index", ">", "1", "or", "self", ".", "software", "!=", "'MetaSeries'", ":", "return", "False", "d", "=", "self", ".", "description", "return", "d", ".", "startswith", "(", "'<MetaData>'", ")", "and", "d", ".", "endswith", "(", "'</MetaData>'", ")" ]
Page contains MDS MetaSeries metadata in ImageDescription tag.
[ "Page", "contains", "MDS", "MetaSeries", "metadata", "in", "ImageDescription", "tag", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4638-L4643
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_ome
def is_ome(self): """Page contains OME-XML in ImageDescription tag.""" if self.index > 1 or not self.description: return False d = self.description return d[:14] == '<?xml version=' and d[-6:] == '</OME>'
python
def is_ome(self): """Page contains OME-XML in ImageDescription tag.""" if self.index > 1 or not self.description: return False d = self.description return d[:14] == '<?xml version=' and d[-6:] == '</OME>'
[ "def", "is_ome", "(", "self", ")", ":", "if", "self", ".", "index", ">", "1", "or", "not", "self", ".", "description", ":", "return", "False", "d", "=", "self", ".", "description", "return", "d", "[", ":", "14", "]", "==", "'<?xml version='", "and", "d", "[", "-", "6", ":", "]", "==", "'</OME>'" ]
Page contains OME-XML in ImageDescription tag.
[ "Page", "contains", "OME", "-", "XML", "in", "ImageDescription", "tag", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4646-L4651
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPage.is_scn
def is_scn(self): """Page contains Leica SCN XML in ImageDescription tag.""" if self.index > 1 or not self.description: return False d = self.description return d[:14] == '<?xml version=' and d[-6:] == '</scn>'
python
def is_scn(self): """Page contains Leica SCN XML in ImageDescription tag.""" if self.index > 1 or not self.description: return False d = self.description return d[:14] == '<?xml version=' and d[-6:] == '</scn>'
[ "def", "is_scn", "(", "self", ")", ":", "if", "self", ".", "index", ">", "1", "or", "not", "self", ".", "description", ":", "return", "False", "d", "=", "self", ".", "description", "return", "d", "[", ":", "14", "]", "==", "'<?xml version='", "and", "d", "[", "-", "6", ":", "]", "==", "'</scn>'" ]
Page contains Leica SCN XML in ImageDescription tag.
[ "Page", "contains", "Leica", "SCN", "XML", "in", "ImageDescription", "tag", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4654-L4659
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame._gettags
def _gettags(self, codes=None, lock=None): """Return list of (code, TiffTag) from file.""" fh = self.parent.filehandle tiff = self.parent.tiff unpack = struct.unpack lock = NullContext() if lock is None else lock tags = [] with lock: fh.seek(self.offset) try: tagno = unpack(tiff.tagnoformat, fh.read(tiff.tagnosize))[0] if tagno > 4096: raise TiffFileError('suspicious number of tags') except Exception: raise TiffFileError( 'corrupted page list at offset %i' % self.offset) tagoffset = self.offset + tiff.tagnosize # fh.tell() tagsize = tiff.tagsize tagindex = -tagsize codeformat = tiff.tagformat1[:2] tagbytes = fh.read(tagsize * tagno) for _ in range(tagno): tagindex += tagsize code = unpack(codeformat, tagbytes[tagindex:tagindex+2])[0] if codes and code not in codes: continue try: tag = TiffTag(self.parent, tagbytes[tagindex:tagindex+tagsize], tagoffset+tagindex) except TiffFileError as exc: log.warning('%s: %s', exc.__class__.__name__, exc) continue tags.append((code, tag)) return tags
python
def _gettags(self, codes=None, lock=None): """Return list of (code, TiffTag) from file.""" fh = self.parent.filehandle tiff = self.parent.tiff unpack = struct.unpack lock = NullContext() if lock is None else lock tags = [] with lock: fh.seek(self.offset) try: tagno = unpack(tiff.tagnoformat, fh.read(tiff.tagnosize))[0] if tagno > 4096: raise TiffFileError('suspicious number of tags') except Exception: raise TiffFileError( 'corrupted page list at offset %i' % self.offset) tagoffset = self.offset + tiff.tagnosize # fh.tell() tagsize = tiff.tagsize tagindex = -tagsize codeformat = tiff.tagformat1[:2] tagbytes = fh.read(tagsize * tagno) for _ in range(tagno): tagindex += tagsize code = unpack(codeformat, tagbytes[tagindex:tagindex+2])[0] if codes and code not in codes: continue try: tag = TiffTag(self.parent, tagbytes[tagindex:tagindex+tagsize], tagoffset+tagindex) except TiffFileError as exc: log.warning('%s: %s', exc.__class__.__name__, exc) continue tags.append((code, tag)) return tags
[ "def", "_gettags", "(", "self", ",", "codes", "=", "None", ",", "lock", "=", "None", ")", ":", "fh", "=", "self", ".", "parent", ".", "filehandle", "tiff", "=", "self", ".", "parent", ".", "tiff", "unpack", "=", "struct", ".", "unpack", "lock", "=", "NullContext", "(", ")", "if", "lock", "is", "None", "else", "lock", "tags", "=", "[", "]", "with", "lock", ":", "fh", ".", "seek", "(", "self", ".", "offset", ")", "try", ":", "tagno", "=", "unpack", "(", "tiff", ".", "tagnoformat", ",", "fh", ".", "read", "(", "tiff", ".", "tagnosize", ")", ")", "[", "0", "]", "if", "tagno", ">", "4096", ":", "raise", "TiffFileError", "(", "'suspicious number of tags'", ")", "except", "Exception", ":", "raise", "TiffFileError", "(", "'corrupted page list at offset %i'", "%", "self", ".", "offset", ")", "tagoffset", "=", "self", ".", "offset", "+", "tiff", ".", "tagnosize", "# fh.tell()", "tagsize", "=", "tiff", ".", "tagsize", "tagindex", "=", "-", "tagsize", "codeformat", "=", "tiff", ".", "tagformat1", "[", ":", "2", "]", "tagbytes", "=", "fh", ".", "read", "(", "tagsize", "*", "tagno", ")", "for", "_", "in", "range", "(", "tagno", ")", ":", "tagindex", "+=", "tagsize", "code", "=", "unpack", "(", "codeformat", ",", "tagbytes", "[", "tagindex", ":", "tagindex", "+", "2", "]", ")", "[", "0", "]", "if", "codes", "and", "code", "not", "in", "codes", ":", "continue", "try", ":", "tag", "=", "TiffTag", "(", "self", ".", "parent", ",", "tagbytes", "[", "tagindex", ":", "tagindex", "+", "tagsize", "]", ",", "tagoffset", "+", "tagindex", ")", "except", "TiffFileError", "as", "exc", ":", "log", ".", "warning", "(", "'%s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")", "continue", "tags", ".", "append", "(", "(", "code", ",", "tag", ")", ")", "return", "tags" ]
Return list of (code, TiffTag) from file.
[ "Return", "list", "of", "(", "code", "TiffTag", ")", "from", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4808-L4846
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame.aspage
def aspage(self): """Return TiffPage from file.""" if self.offset is None: raise ValueError('cannot return virtual frame as page.') self.parent.filehandle.seek(self.offset) return TiffPage(self.parent, index=self.index)
python
def aspage(self): """Return TiffPage from file.""" if self.offset is None: raise ValueError('cannot return virtual frame as page.') self.parent.filehandle.seek(self.offset) return TiffPage(self.parent, index=self.index)
[ "def", "aspage", "(", "self", ")", ":", "if", "self", ".", "offset", "is", "None", ":", "raise", "ValueError", "(", "'cannot return virtual frame as page.'", ")", "self", ".", "parent", ".", "filehandle", ".", "seek", "(", "self", ".", "offset", ")", "return", "TiffPage", "(", "self", ".", "parent", ",", "index", "=", "self", ".", "index", ")" ]
Return TiffPage from file.
[ "Return", "TiffPage", "from", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4848-L4853
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame.asarray
def asarray(self, *args, **kwargs): """Read image data from file and return as numpy array.""" # TODO: fix TypeError on Python 2 # "TypeError: unbound method asarray() must be called with TiffPage # instance as first argument (got TiffFrame instance instead)" if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asarray(self, *args, **kwargs)
python
def asarray(self, *args, **kwargs): """Read image data from file and return as numpy array.""" # TODO: fix TypeError on Python 2 # "TypeError: unbound method asarray() must be called with TiffPage # instance as first argument (got TiffFrame instance instead)" if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asarray(self, *args, **kwargs)
[ "def", "asarray", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: fix TypeError on Python 2", "# \"TypeError: unbound method asarray() must be called with TiffPage", "# instance as first argument (got TiffFrame instance instead)\"", "if", "self", ".", "_keyframe", "is", "None", ":", "raise", "RuntimeError", "(", "'keyframe not set'", ")", "kwargs", "[", "'validate'", "]", "=", "False", "return", "TiffPage", ".", "asarray", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Read image data from file and return as numpy array.
[ "Read", "image", "data", "from", "file", "and", "return", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4855-L4863
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame.asrgb
def asrgb(self, *args, **kwargs): """Read image data from file and return RGB image as numpy array.""" if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asrgb(self, *args, **kwargs)
python
def asrgb(self, *args, **kwargs): """Read image data from file and return RGB image as numpy array.""" if self._keyframe is None: raise RuntimeError('keyframe not set') kwargs['validate'] = False return TiffPage.asrgb(self, *args, **kwargs)
[ "def", "asrgb", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_keyframe", "is", "None", ":", "raise", "RuntimeError", "(", "'keyframe not set'", ")", "kwargs", "[", "'validate'", "]", "=", "False", "return", "TiffPage", ".", "asrgb", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Read image data from file and return RGB image as numpy array.
[ "Read", "image", "data", "from", "file", "and", "return", "RGB", "image", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4865-L4870
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame.keyframe
def keyframe(self, keyframe): """Set keyframe.""" if self._keyframe == keyframe: return if self._keyframe is not None: raise RuntimeError('cannot reset keyframe') if len(self._offsetscounts[0]) != len(keyframe.dataoffsets): raise RuntimeError('incompatible keyframe') if keyframe.is_tiled: pass if keyframe.is_contiguous: self._offsetscounts = ([self._offsetscounts[0][0]], [keyframe.is_contiguous[1]]) else: self._offsetscounts = clean_offsetscounts(*self._offsetscounts) self._keyframe = keyframe
python
def keyframe(self, keyframe): """Set keyframe.""" if self._keyframe == keyframe: return if self._keyframe is not None: raise RuntimeError('cannot reset keyframe') if len(self._offsetscounts[0]) != len(keyframe.dataoffsets): raise RuntimeError('incompatible keyframe') if keyframe.is_tiled: pass if keyframe.is_contiguous: self._offsetscounts = ([self._offsetscounts[0][0]], [keyframe.is_contiguous[1]]) else: self._offsetscounts = clean_offsetscounts(*self._offsetscounts) self._keyframe = keyframe
[ "def", "keyframe", "(", "self", ",", "keyframe", ")", ":", "if", "self", ".", "_keyframe", "==", "keyframe", ":", "return", "if", "self", ".", "_keyframe", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'cannot reset keyframe'", ")", "if", "len", "(", "self", ".", "_offsetscounts", "[", "0", "]", ")", "!=", "len", "(", "keyframe", ".", "dataoffsets", ")", ":", "raise", "RuntimeError", "(", "'incompatible keyframe'", ")", "if", "keyframe", ".", "is_tiled", ":", "pass", "if", "keyframe", ".", "is_contiguous", ":", "self", ".", "_offsetscounts", "=", "(", "[", "self", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", "]", ",", "[", "keyframe", ".", "is_contiguous", "[", "1", "]", "]", ")", "else", ":", "self", ".", "_offsetscounts", "=", "clean_offsetscounts", "(", "*", "self", ".", "_offsetscounts", ")", "self", ".", "_keyframe", "=", "keyframe" ]
Set keyframe.
[ "Set", "keyframe", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4878-L4893
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffFrame.is_contiguous
def is_contiguous(self): """Return offset and size of contiguous data, else None.""" if self._keyframe is None: raise RuntimeError('keyframe not set') if self._keyframe.is_contiguous: return self._offsetscounts[0][0], self._keyframe.is_contiguous[1] return None
python
def is_contiguous(self): """Return offset and size of contiguous data, else None.""" if self._keyframe is None: raise RuntimeError('keyframe not set') if self._keyframe.is_contiguous: return self._offsetscounts[0][0], self._keyframe.is_contiguous[1] return None
[ "def", "is_contiguous", "(", "self", ")", ":", "if", "self", ".", "_keyframe", "is", "None", ":", "raise", "RuntimeError", "(", "'keyframe not set'", ")", "if", "self", ".", "_keyframe", ".", "is_contiguous", ":", "return", "self", ".", "_offsetscounts", "[", "0", "]", "[", "0", "]", ",", "self", ".", "_keyframe", ".", "is_contiguous", "[", "1", "]", "return", "None" ]
Return offset and size of contiguous data, else None.
[ "Return", "offset", "and", "size", "of", "contiguous", "data", "else", "None", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L4896-L4902
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffTag.name
def name(self): """Return name of tag from TIFF.TAGS registry.""" try: return TIFF.TAGS[self.code] except KeyError: return str(self.code)
python
def name(self): """Return name of tag from TIFF.TAGS registry.""" try: return TIFF.TAGS[self.code] except KeyError: return str(self.code)
[ "def", "name", "(", "self", ")", ":", "try", ":", "return", "TIFF", ".", "TAGS", "[", "self", ".", "code", "]", "except", "KeyError", ":", "return", "str", "(", "self", ".", "code", ")" ]
Return name of tag from TIFF.TAGS registry.
[ "Return", "name", "of", "tag", "from", "TIFF", ".", "TAGS", "registry", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5038-L5043
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffTag._fix_lsm_bitspersample
def _fix_lsm_bitspersample(self, parent): """Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag. """ if self.code != 258 or self.count != 2: return # TODO: test this case; need example file log.warning('TiffTag %i: correcting LSM bitspersample tag', self.code) value = struct.pack('<HH', *self.value) self.valueoffset = struct.unpack('<I', value)[0] parent.filehandle.seek(self.valueoffset) self.value = struct.unpack('<HH', parent.filehandle.read(4))
python
def _fix_lsm_bitspersample(self, parent): """Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag. """ if self.code != 258 or self.count != 2: return # TODO: test this case; need example file log.warning('TiffTag %i: correcting LSM bitspersample tag', self.code) value = struct.pack('<HH', *self.value) self.valueoffset = struct.unpack('<I', value)[0] parent.filehandle.seek(self.valueoffset) self.value = struct.unpack('<HH', parent.filehandle.read(4))
[ "def", "_fix_lsm_bitspersample", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "code", "!=", "258", "or", "self", ".", "count", "!=", "2", ":", "return", "# TODO: test this case; need example file", "log", ".", "warning", "(", "'TiffTag %i: correcting LSM bitspersample tag'", ",", "self", ".", "code", ")", "value", "=", "struct", ".", "pack", "(", "'<HH'", ",", "*", "self", ".", "value", ")", "self", ".", "valueoffset", "=", "struct", ".", "unpack", "(", "'<I'", ",", "value", ")", "[", "0", "]", "parent", ".", "filehandle", ".", "seek", "(", "self", ".", "valueoffset", ")", "self", ".", "value", "=", "struct", ".", "unpack", "(", "'<HH'", ",", "parent", ".", "filehandle", ".", "read", "(", "4", ")", ")" ]
Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag.
[ "Correct", "LSM", "bitspersample", "tag", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5045-L5059
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPageSeries.asarray
def asarray(self, out=None): """Return image data from series of TIFF pages as numpy array.""" if self.parent: result = self.parent.asarray(series=self, out=out) if self.transform is not None: result = self.transform(result) return result return None
python
def asarray(self, out=None): """Return image data from series of TIFF pages as numpy array.""" if self.parent: result = self.parent.asarray(series=self, out=out) if self.transform is not None: result = self.transform(result) return result return None
[ "def", "asarray", "(", "self", ",", "out", "=", "None", ")", ":", "if", "self", ".", "parent", ":", "result", "=", "self", ".", "parent", ".", "asarray", "(", "series", "=", "self", ",", "out", "=", "out", ")", "if", "self", ".", "transform", "is", "not", "None", ":", "result", "=", "self", ".", "transform", "(", "result", ")", "return", "result", "return", "None" ]
Return image data from series of TIFF pages as numpy array.
[ "Return", "image", "data", "from", "series", "of", "TIFF", "pages", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5127-L5134
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPageSeries.offset
def offset(self): """Return offset to series data in file, if any.""" if not self._pages: return None pos = 0 for page in self._pages: if page is None: return None if not page.is_final: return None if not pos: pos = page.is_contiguous[0] + page.is_contiguous[1] continue if pos != page.is_contiguous[0]: return None pos += page.is_contiguous[1] page = self._pages[0] offset = page.is_contiguous[0] if (page.is_imagej or page.is_shaped) and len(self._pages) == 1: # truncated files return offset if pos == offset + product(self.shape) * self.dtype.itemsize: return offset return None
python
def offset(self): """Return offset to series data in file, if any.""" if not self._pages: return None pos = 0 for page in self._pages: if page is None: return None if not page.is_final: return None if not pos: pos = page.is_contiguous[0] + page.is_contiguous[1] continue if pos != page.is_contiguous[0]: return None pos += page.is_contiguous[1] page = self._pages[0] offset = page.is_contiguous[0] if (page.is_imagej or page.is_shaped) and len(self._pages) == 1: # truncated files return offset if pos == offset + product(self.shape) * self.dtype.itemsize: return offset return None
[ "def", "offset", "(", "self", ")", ":", "if", "not", "self", ".", "_pages", ":", "return", "None", "pos", "=", "0", "for", "page", "in", "self", ".", "_pages", ":", "if", "page", "is", "None", ":", "return", "None", "if", "not", "page", ".", "is_final", ":", "return", "None", "if", "not", "pos", ":", "pos", "=", "page", ".", "is_contiguous", "[", "0", "]", "+", "page", ".", "is_contiguous", "[", "1", "]", "continue", "if", "pos", "!=", "page", ".", "is_contiguous", "[", "0", "]", ":", "return", "None", "pos", "+=", "page", ".", "is_contiguous", "[", "1", "]", "page", "=", "self", ".", "_pages", "[", "0", "]", "offset", "=", "page", ".", "is_contiguous", "[", "0", "]", "if", "(", "page", ".", "is_imagej", "or", "page", ".", "is_shaped", ")", "and", "len", "(", "self", ".", "_pages", ")", "==", "1", ":", "# truncated files", "return", "offset", "if", "pos", "==", "offset", "+", "product", "(", "self", ".", "shape", ")", "*", "self", ".", "dtype", ".", "itemsize", ":", "return", "offset", "return", "None" ]
Return offset to series data in file, if any.
[ "Return", "offset", "to", "series", "data", "in", "file", "if", "any", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5137-L5162
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffPageSeries._getitem
def _getitem(self, key): """Return specified page of series from cache or file.""" key = int(key) if key < 0: key %= self._len if len(self._pages) == 1 and 0 < key < self._len: index = self._pages[0].index return self.parent.pages._getitem(index + key) return self._pages[key]
python
def _getitem(self, key): """Return specified page of series from cache or file.""" key = int(key) if key < 0: key %= self._len if len(self._pages) == 1 and 0 < key < self._len: index = self._pages[0].index return self.parent.pages._getitem(index + key) return self._pages[key]
[ "def", "_getitem", "(", "self", ",", "key", ")", ":", "key", "=", "int", "(", "key", ")", "if", "key", "<", "0", ":", "key", "%=", "self", ".", "_len", "if", "len", "(", "self", ".", "_pages", ")", "==", "1", "and", "0", "<", "key", "<", "self", ".", "_len", ":", "index", "=", "self", ".", "_pages", "[", "0", "]", ".", "index", "return", "self", ".", "parent", ".", "pages", ".", "_getitem", "(", "index", "+", "key", ")", "return", "self", ".", "_pages", "[", "key", "]" ]
Return specified page of series from cache or file.
[ "Return", "specified", "page", "of", "series", "from", "cache", "or", "file", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5180-L5188
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
TiffSequence.asarray
def asarray(self, file=None, out=None, **kwargs): """Read image data from files and return as numpy array. The kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes do not match. """ if file is not None: if isinstance(file, int): return self.imread(self.files[file], **kwargs) return self.imread(file, **kwargs) im = self.imread(self.files[0], **kwargs) shape = self.shape + im.shape result = create_output(out, shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i-j for i, j in zip(index, self._startindex)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, **kwargs) result[index] = im result.shape = shape return result
python
def asarray(self, file=None, out=None, **kwargs): """Read image data from files and return as numpy array. The kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes do not match. """ if file is not None: if isinstance(file, int): return self.imread(self.files[file], **kwargs) return self.imread(file, **kwargs) im = self.imread(self.files[0], **kwargs) shape = self.shape + im.shape result = create_output(out, shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i-j for i, j in zip(index, self._startindex)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, **kwargs) result[index] = im result.shape = shape return result
[ "def", "asarray", "(", "self", ",", "file", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "file", "is", "not", "None", ":", "if", "isinstance", "(", "file", ",", "int", ")", ":", "return", "self", ".", "imread", "(", "self", ".", "files", "[", "file", "]", ",", "*", "*", "kwargs", ")", "return", "self", ".", "imread", "(", "file", ",", "*", "*", "kwargs", ")", "im", "=", "self", ".", "imread", "(", "self", ".", "files", "[", "0", "]", ",", "*", "*", "kwargs", ")", "shape", "=", "self", ".", "shape", "+", "im", ".", "shape", "result", "=", "create_output", "(", "out", ",", "shape", ",", "dtype", "=", "im", ".", "dtype", ")", "result", "=", "result", ".", "reshape", "(", "-", "1", ",", "*", "im", ".", "shape", ")", "for", "index", ",", "fname", "in", "zip", "(", "self", ".", "_indices", ",", "self", ".", "files", ")", ":", "index", "=", "[", "i", "-", "j", "for", "i", ",", "j", "in", "zip", "(", "index", ",", "self", ".", "_startindex", ")", "]", "index", "=", "numpy", ".", "ravel_multi_index", "(", "index", ",", "self", ".", "shape", ")", "im", "=", "self", ".", "imread", "(", "fname", ",", "*", "*", "kwargs", ")", "result", "[", "index", "]", "=", "im", "result", ".", "shape", "=", "shape", "return", "result" ]
Read image data from files and return as numpy array. The kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes do not match.
[ "Read", "image", "data", "from", "files", "and", "return", "as", "numpy", "array", "." ]
train
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L5377-L5400