hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
38130b17f976bb6e9362cab7c0b3b3194c6644a357928a4401fd5a46c34d13c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file is the main file used when running tests with pytest directly,
# in particular if running e.g. ``pytest docs/``.
import os
import tempfile
import hypothesis
from astropy import __version__
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
# This has to be in the root dir or it will not display in CI.
def pytest_configure(config):
PYTEST_HEADER_MODULES['PyERFA'] = 'erfa'
PYTEST_HEADER_MODULES['Cython'] = 'cython'
PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'
PYTEST_HEADER_MODULES['asdf'] = 'asdf'
PYTEST_HEADER_MODULES['pyarrow'] = 'pyarrow'
TESTED_VERSIONS['Astropy'] = __version__
# This has to be in the root dir or it will not display in CI.
def pytest_report_header(config):
# This gets added after the pytest-astropy-header output.
return (f'ARCH_ON_CI: {os.environ.get("ARCH_ON_CI", "undefined")}\n'
f'IS_CRON: {os.environ.get("IS_CRON", "undefined")}\n')
# Tell Hypothesis that we might be running slow tests, to print the seed blob
# so we can easily reproduce failures from CI, and derive a fuzzing profile
# to try many more inputs when we detect a scheduled build or when specifically
# requested using the HYPOTHESIS_PROFILE=fuzz environment variable or
# `pytest --hypothesis-profile=fuzz ...` argument.
hypothesis.settings.register_profile(
'ci', deadline=None, print_blob=True, derandomize=True
)
hypothesis.settings.register_profile(
'fuzzing', deadline=None, print_blob=True, max_examples=1000
)
default = 'fuzzing' if (os.environ.get('IS_CRON') == 'true' and os.environ.get('ARCH_ON_CI') not in ('aarch64', 'ppc64le')) else 'ci' # noqa: E501
hypothesis.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', default))
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
|
bded93adc301a0fcc3dd01dc4f3e259ca71231cc421fd7bee8443d206b1ce143 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file.
import sys
# First provide helpful messages if contributors try and run legacy commands
# for tests or docs.
TEST_HELP = """
Note: running tests is no longer done using 'python setup.py test'. Instead
you will need to run:
tox -e test
If you don't already have tox installed, you can install it with:
pip install tox
If you only want to run part of the test suite, you can also use pytest
directly with::
pip install -e .[test]
pytest
For more information, see:
https://docs.astropy.org/en/latest/development/testguide.html#running-tests
"""
if 'test' in sys.argv:
print(TEST_HELP)
sys.exit(1)
DOCS_HELP = """
Note: building the documentation is no longer done using
'python setup.py build_docs'. Instead you will need to run:
tox -e build_docs
If you don't already have tox installed, you can install it with:
pip install tox
You can also build the documentation with Sphinx directly using::
pip install -e .[docs]
cd docs
make html
For more information, see:
https://docs.astropy.org/en/latest/install.html#builddocs
"""
if 'build_docs' in sys.argv or 'build_sphinx' in sys.argv:
print(DOCS_HELP)
sys.exit(1)
# Only import these if the above checks are okay
# to avoid masking the real problem with import error.
from setuptools import setup # noqa
from extension_helpers import get_extensions # noqa
setup(ext_modules=get_extensions())
|
73b018608b35beb850df948ebe0315cbe4f8019618c62ccea33612d1f828bdf7 | import os
import shutil
import sys
import erfa # noqa
import pytest
import astropy # noqa
if len(sys.argv) == 3 and sys.argv[1] == '--astropy-root':
ROOT = sys.argv[2]
else:
# Make sure we don't allow any arguments to be passed - some tests call
# sys.executable which becomes this script when producing a pyinstaller
# bundle, but we should just error in this case since this is not the
# regular Python interpreter.
if len(sys.argv) > 1:
print("Extra arguments passed, exiting early")
sys.exit(1)
for root, dirnames, files in os.walk(os.path.join(ROOT, 'astropy')):
# NOTE: we can't simply use
# test_root = root.replace('astropy', 'astropy_tests')
# as we only want to change the one which is for the module, so instead
# we search for the last occurrence and replace that.
pos = root.rfind('astropy')
test_root = root[:pos] + 'astropy_tests' + root[pos + 7:]
# Copy over the astropy 'tests' directories and their contents
for dirname in dirnames:
final_dir = os.path.relpath(os.path.join(test_root, dirname), ROOT)
# We only copy over 'tests' directories, but not astropy/tests (only
# astropy/tests/tests) since that is not just a directory with tests.
if dirname == 'tests' and not root.endswith('astropy'):
shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True)
else:
# Create empty __init__.py files so that 'astropy_tests' still
# behaves like a single package, otherwise pytest gets confused
# by the different conftest.py files.
init_filename = os.path.join(final_dir, '__init__.py')
if not os.path.exists(os.path.join(final_dir, '__init__.py')):
os.makedirs(final_dir, exist_ok=True)
with open(os.path.join(final_dir, '__init__.py'), 'w') as f:
f.write("#")
# Copy over all conftest.py files
for file in files:
if file == 'conftest.py':
final_file = os.path.relpath(os.path.join(test_root, file), ROOT)
shutil.copy2(os.path.join(root, file), final_file)
# Add the top-level __init__.py file
with open(os.path.join('astropy_tests', '__init__.py'), 'w') as f:
f.write("#")
# Remove test file that tries to import all sub-packages at collection time
os.remove(os.path.join('astropy_tests', 'utils', 'iers', 'tests', 'test_leap_second.py'))
# Remove convolution tests for now as there are issues with the loading of the C extension.
# FIXME: one way to fix this would be to migrate the convolution C extension away from using
# ctypes and using the regular extension mechanism instead.
shutil.rmtree(os.path.join('astropy_tests', 'convolution'))
os.remove(os.path.join('astropy_tests', 'modeling', 'tests', 'test_convolution.py'))
os.remove(os.path.join('astropy_tests', 'modeling', 'tests', 'test_core.py'))
os.remove(os.path.join('astropy_tests', 'visualization', 'tests', 'test_lupton_rgb.py'))
# FIXME: The following tests rely on the fully qualified name of classes which
# don't seem to be the same.
os.remove(os.path.join('astropy_tests', 'table', 'mixins', 'tests', 'test_registry.py'))
# Copy the top-level conftest.py
shutil.copy2(os.path.join(ROOT, 'astropy', 'conftest.py'),
os.path.join('astropy_tests', 'conftest.py'))
# We skip a few tests, which are generally ones that rely on explicitly
# checking the name of the current module (which ends up starting with
# astropy_tests rather than astropy).
SKIP_TESTS = ['test_exception_logging_origin',
'test_log',
'test_configitem',
'test_config_noastropy_fallback',
'test_no_home',
'test_path',
'test_rename_path',
'test_data_name_third_party_package',
'test_pkg_finder',
'test_wcsapi_extension',
'test_find_current_module_bundle',
'test_minversion',
'test_imports',
'test_generate_config',
'test_generate_config2',
'test_create_config_file',
'test_download_parallel_fills_cache']
# Run the tests!
sys.exit(pytest.main(['astropy_tests',
'-k ' + ' and '.join('not ' + test for test in SKIP_TESTS)],
plugins=['pytest_doctestplus.plugin',
'pytest_openfiles.plugin',
'pytest_remotedata.plugin',
'pytest_astropy_header.display']))
|
f3a0aeb97076b413c5b12bf54c9d21687bfae4f546d809811d8fd5110a7aa5eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import os
import sys
from .version import version as __version__
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'https://docs.astropy.org/en/latest/'
else:
online_docs_root = f'https://docs.astropy.org/en/{__version__}/'
from . import config as _config # noqa: E402
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState # noqa: E402
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below
"""
_value = 'test'
_versions = dict(test='test')
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f'Must be one of {list(cls._versions.keys())}')
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if 'astropy.units' in sys.modules:
raise RuntimeError('astropy.units is already imported')
if 'astropy.constants' in sys.modules:
raise RuntimeError('astropy.constants is already imported')
return super().set(value)
class physical_constants(base_constants_version):
"""
The version of physical constants to use
"""
# Maintainers: update when new constants are added
_value = 'codata2018'
_versions = dict(codata2018='codata2018', codata2014='codata2014',
codata2010='codata2010', astropyconst40='codata2018',
astropyconst20='codata2014', astropyconst13='codata2010')
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use
"""
# Maintainers: update when new constants are added
_value = 'iau2015'
_versions = dict(iau2015='iau2015', iau2012='iau2012',
astropyconst40='iau2015', astropyconst20='iau2015',
astropyconst13='iau2012')
# Create the test() function
from .tests.runner import TestRunner # noqa: E402
test = TestRunner.make_test_runner_in(__path__[0]) # noqa: F821
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
try:
from .utils import _compiler # noqa: F401
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built ')
else:
# Outright broken installation, just raise standard error
raise
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file, 'r') as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0:
return ''
bibtexreference = f'@ARTICLE{refs[0]}'
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log # noqa: E402, F401
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page # noqa: E402, F401
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
from urllib.parse import urlencode
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = f"https://docs.astropy.org/en/{version}/search.html?{urlencode({'q': query})}"
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf', 'physical_constants',
'astronomical_constants']
from types import ModuleType as __module_type__ # noqa: E402
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
ab94da6ed51f8e247cb5bf4f29bd54aa85a12e98494aebf149298826f5daa769 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines a logging class based on the built-in logging module.
.. note::
This module is meant for internal ``astropy`` usage. For use in other
packages, we recommend implementing your own logger instead.
"""
import inspect
import os
import sys
import logging
import warnings
from contextlib import contextmanager
from . import config as _config
from . import conf as _conf
from .utils import find_current_module
from .utils.exceptions import AstropyWarning, AstropyUserWarning
__all__ = ['Conf', 'conf', 'log', 'AstropyLogger', 'LoggingError']
# import the logging levels from logging so that one can do:
# log.setLevel(log.DEBUG), for example
logging_levels = ['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL',
'FATAL', ]
for level in logging_levels:
globals()[level] = getattr(logging, level)
__all__ += logging_levels
# Initialize by calling _init_log()
log = None
class LoggingError(Exception):
"""
This exception is for various errors that occur in the astropy logger,
typically when activating or deactivating logger-related features.
"""
class _AstLogIPYExc(Exception):
"""
An exception that is used only as a placeholder to indicate to the
IPython exception-catching mechanism that the astropy
exception-capturing is activated. It should not actually be used as
an exception anywhere.
"""
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.logger`.
"""
log_level = _config.ConfigItem(
'INFO',
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are ``'DEBUG'``, "
"``'INFO'``, ``'WARNING'``, ``'ERROR'``.")
log_warnings = _config.ConfigItem(
True,
"Whether to log `warnings.warn` calls.")
log_exceptions = _config.ConfigItem(
False,
"Whether to log exceptions before raising "
"them.")
log_to_file = _config.ConfigItem(
False,
"Whether to always log messages to a log "
"file.")
log_file_path = _config.ConfigItem(
'',
"The file to log messages to. If empty string is given, "
"it defaults to a file ``'astropy.log'`` in "
"the astropy config directory.")
log_file_level = _config.ConfigItem(
'INFO',
"Threshold for logging messages to "
"`log_file_path`.")
log_file_format = _config.ConfigItem(
"%(asctime)r, "
"%(origin)r, %(levelname)r, %(message)r",
"Format for log file entries.")
log_file_encoding = _config.ConfigItem(
'',
"The encoding (e.g., UTF-8) to use for the log file. If empty string "
"is given, it defaults to the platform-preferred encoding.")
conf = Conf()
def _init_log():
"""Initializes the Astropy log--in most circumstances this is called
automatically when importing astropy.
"""
global log
orig_logger_cls = logging.getLoggerClass()
logging.setLoggerClass(AstropyLogger)
try:
log = logging.getLogger('astropy')
log._set_defaults()
finally:
logging.setLoggerClass(orig_logger_cls)
return log
def _teardown_log():
"""Shut down exception and warning logging (if enabled) and clear all
Astropy loggers from the logging module's cache.
This involves poking some logging module internals, so much if it is 'at
your own risk' and is allowed to pass silently if any exceptions occur.
"""
global log
if log.exception_logging_enabled():
log.disable_exception_logging()
if log.warnings_logging_enabled():
log.disable_warnings_logging()
del log
# Now for the fun stuff...
try:
logging._acquireLock()
try:
loggerDict = logging.Logger.manager.loggerDict
for key in loggerDict.keys():
if key == 'astropy' or key.startswith('astropy.'):
del loggerDict[key]
finally:
logging._releaseLock()
except Exception:
pass
Logger = logging.getLoggerClass()
class AstropyLogger(Logger):
'''
This class is used to set up the Astropy logging.
The main functionality added by this class over the built-in
logging.Logger class is the ability to keep track of the origin of the
messages, the ability to enable logging of warnings.warn calls and
exceptions, and the addition of colorized output and context managers to
easily capture messages to a file or list.
'''
def makeRecord(self, name, level, pathname, lineno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
if extra is None:
extra = {}
if 'origin' not in extra:
current_module = find_current_module(1, finddiff=[True, 'logging'])
if current_module is not None:
extra['origin'] = current_module.__name__
else:
extra['origin'] = 'unknown'
return Logger.makeRecord(self, name, level, pathname, lineno, msg,
args, exc_info, func=func, extra=extra,
sinfo=sinfo)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
# Bail out if we are not catching a warning from Astropy
if not isinstance(args[0], AstropyWarning):
return self._showwarning_orig(*args, **kwargs)
warning = args[0]
# Deliberately not using isinstance here: We want to display
# the class name only when it's not the default class,
# AstropyWarning. The name of subclasses of AstropyWarning should
# be displayed.
if type(warning) not in (AstropyWarning, AstropyUserWarning):
message = f'{warning.__class__.__name__}: {args[0]}'
else:
message = str(args[0])
mod_path = args[2]
# Now that we have the module's path, we look through sys.modules to
# find the module object and thus the fully-package-specified module
# name. The module.__file__ is the original source file name.
mod_name = None
mod_path, ext = os.path.splitext(mod_path)
for name, mod in list(sys.modules.items()):
try:
# Believe it or not this can fail in some cases:
# https://github.com/astropy/astropy/issues/2671
path = os.path.splitext(getattr(mod, '__file__', ''))[0]
except Exception:
continue
if path == mod_path:
mod_name = mod.__name__
break
if mod_name is not None:
self.warning(message, extra={'origin': mod_name})
else:
self.warning(message)
def warnings_logging_enabled(self):
return self._showwarning_orig is not None
def enable_warnings_logging(self):
'''
Enable logging of warnings.warn() calls
Once called, any subsequent calls to ``warnings.warn()`` are
redirected to this logger and emitted with level ``WARN``. Note that
this replaces the output from ``warnings.warn``.
This can be disabled with ``disable_warnings_logging``.
'''
if self.warnings_logging_enabled():
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
'''
Disable logging of warnings.warn() calls
Once called, any subsequent calls to ``warnings.warn()`` are no longer
redirected to this logger.
This can be re-enabled with ``enable_warnings_logging``.
'''
if not self.warnings_logging_enabled():
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError("Cannot disable warnings logging: "
"warnings.showwarning was not set by this "
"logger, or has been overridden")
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, etype, value, traceback):
if traceback is None:
mod = None
else:
tb = traceback
while tb.tb_next is not None:
tb = tb.tb_next
mod = inspect.getmodule(tb)
# include the the error type in the message.
if len(value.args) > 0:
message = f'{etype.__name__}: {str(value)}'
else:
message = str(etype.__name__)
if mod is not None:
self.error(message, extra={'origin': mod.__name__})
else:
self.error(message)
self._excepthook_orig(etype, value, traceback)
def exception_logging_enabled(self):
'''
Determine if the exception-logging mechanism is enabled.
Returns
-------
exclog : bool
True if exception logging is on, False if not.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if ip is None:
return self._excepthook_orig is not None
else:
return _AstLogIPYExc in ip.custom_exceptions
def enable_exception_logging(self):
'''
Enable logging of exceptions
Once called, any uncaught exceptions will be emitted with level
``ERROR`` by this logger, before being raised.
This can be disabled with ``disable_exception_logging``.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if self.exception_logging_enabled():
raise LoggingError("Exception logging has already been enabled")
if ip is None:
# standard python interpreter
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
else:
# IPython has its own way of dealing with excepthook
# We need to locally define the function here, because IPython
# actually makes this a member function of their own class
def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None):
# First use our excepthook
self._excepthook(etype, evalue, tb)
# Now also do IPython's traceback
ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
# now register the function with IPython
# note that we include _AstLogIPYExc so `disable_exception_logging`
# knows that it's disabling the right thing
ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler)
# and set self._excepthook_orig to a no-op
self._excepthook_orig = lambda etype, evalue, tb: None
def disable_exception_logging(self):
'''
Disable logging of exceptions
Once called, any uncaught exceptions will no longer be emitted by this
logger.
This can be re-enabled with ``enable_exception_logging``.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if not self.exception_logging_enabled():
raise LoggingError("Exception logging has not been enabled")
if ip is None:
# standard python interpreter
if sys.excepthook != self._excepthook:
raise LoggingError("Cannot disable exception logging: "
"sys.excepthook was not set by this logger, "
"or has been overridden")
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
else:
# IPython has its own way of dealing with exceptions
ip.set_custom_exc(tuple(), None)
def enable_color(self):
'''
Enable colorized output
'''
_conf.use_color = True
def disable_color(self):
'''
Disable colorized output
'''
_conf.use_color = False
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
'''
Context manager to temporarily log messages to a file.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
By default, the logger already outputs log messages to a file set in
the Astropy configuration file. Using this context manager does not
stop log messages from being output to that file, nor does it stop log
messages from being printed to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_file('myfile.log'):
# your code here
'''
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(filename, encoding=encoding)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(conf.log_file_format)
fh.setFormatter(f)
self.addHandler(fh)
yield
fh.close()
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
'''
Context manager to temporarily log messages to a list.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
Using this context manager does not stop log messages from being
output to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_list() as log_list:
# your code here
'''
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def _set_defaults(self):
'''
Reset logger to its initial state
'''
# Reset any previously installed hooks
if self.warnings_logging_enabled():
self.disable_warnings_logging()
if self.exception_logging_enabled():
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(conf.log_level)
# Set up the stdout handler
sh = StreamHandler()
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if conf.log_to_file:
log_file_path = conf.log_file_path
# "None" as a string because it comes from config
try:
_ASTROPY_TEST_
testing_mode = True
except NameError:
testing_mode = False
try:
if log_file_path == '' or testing_mode:
log_file_path = os.path.join(
_config.get_config_dir('astropy'), "astropy.log")
else:
log_file_path = os.path.expanduser(log_file_path)
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(log_file_path, encoding=encoding)
except OSError as e:
warnings.warn(
f'log file {log_file_path!r} could not be opened for writing: {str(e)}',
RuntimeWarning)
else:
formatter = logging.Formatter(conf.log_file_format)
fh.setFormatter(formatter)
fh.setLevel(conf.log_file_level)
self.addHandler(fh)
if conf.log_warnings:
self.enable_warnings_logging()
if conf.log_exceptions:
self.enable_exception_logging()
class StreamHandler(logging.StreamHandler):
"""
A specialized StreamHandler that logs INFO and DEBUG messages to
stdout, and all other messages to stderr. Also provides coloring
of the output, if enabled in the parent logger.
"""
def emit(self, record):
'''
The formatter for stderr
'''
if record.levelno <= logging.INFO:
stream = sys.stdout
else:
stream = sys.stderr
if record.levelno < logging.DEBUG or not _conf.use_color:
print(record.levelname, end='', file=stream)
else:
# Import utils.console only if necessary and at the latest because
# the import takes a significant time [#4649]
from .utils.console import color_print
if record.levelno < logging.INFO:
color_print(record.levelname, 'magenta', end='', file=stream)
elif record.levelno < logging.WARN:
color_print(record.levelname, 'green', end='', file=stream)
elif record.levelno < logging.ERROR:
color_print(record.levelname, 'brown', end='', file=stream)
else:
color_print(record.levelname, 'red', end='', file=stream)
record.message = f"{record.msg} [{record.origin:s}]"
print(": " + record.message, file=stream)
class FilterOrigin:
'''A filter for the record origin'''
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
class ListHandler(logging.Handler):
'''A handler that can be used to capture the records in a list'''
def __init__(self, filter_level=None, filter_origin=None):
logging.Handler.__init__(self)
self.log_list = []
def emit(self, record):
self.log_list.append(record)
|
849d26898fd469a494890a464c47e1076f6d55df28b54c2619cc895ab57340d1 | # NOTE: First try _dev.scm_version if it exists and setuptools_scm is installed
# This file is not included in astropy wheels/tarballs, so otherwise it will
# fall back on the generated _version module.
try:
try:
from ._dev.scm_version import version
except ImportError:
from ._version import version
except Exception:
import warnings
warnings.warn(
f'could not determine {__name__.split(".")[0]} package version; '
f'this indicates a broken installation')
del warnings
version = '0.0.0'
# We use Version to define major, minor, micro, but ignore any suffixes.
def split_version(version):
pieces = [0, 0, 0]
try:
from packaging.version import Version
v = Version(version)
pieces = [v.major, v.minor, v.micro]
except Exception:
pass
return pieces
major, minor, bugfix = split_version(version)
del split_version # clean up namespace.
release = 'dev' not in version
|
e3970894c3f13086681508803f5f44c92c2fd9b69c5f237606b04099de0ac0dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import builtins
import os
import sys
import tempfile
import warnings
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
import pytest
from astropy import __version__
# This is needed to silence a warning from matplotlib caused by
# PyInstaller's matplotlib runtime hook. This can be removed once the
# issue is fixed upstream in PyInstaller, and only impacts us when running
# the tests from a PyInstaller bundle.
# See https://github.com/astropy/astropy/issues/10785
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
# The above checks whether we are running in a PyInstaller bundle.
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*",
category=UserWarning)
# Note: while the filterwarnings is required, this import has to come after the
# filterwarnings above, because this attempts to import matplotlib:
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB # noqa: E402
if HAS_MATPLOTLIB:
import matplotlib
matplotlibrc_cache = {}
@pytest.fixture
def ignore_matplotlibrc():
# This is a fixture for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
from matplotlib import pyplot as plt
with plt.style.context({}, after_reset=True):
yield
@pytest.fixture
def fast_thread_switching():
"""Fixture that reduces thread switching interval.
This makes it easier to provoke race conditions.
"""
old = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
yield
sys.setswitchinterval(old)
def pytest_configure(config):
from astropy.utils.iers import conf as iers_conf
# Disable IERS auto download for testing
iers_conf.auto_download = False
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
matplotlib.use('Agg')
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration. Note that this
# is also set in the test runner, but we need to also set it here for
# things to work properly in parallel mode
builtins._xdg_config_home_orig = os.environ.get('XDG_CONFIG_HOME')
builtins._xdg_cache_home_orig = os.environ.get('XDG_CACHE_HOME')
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
config.option.astropy_header = True
PYTEST_HEADER_MODULES['PyERFA'] = 'erfa'
PYTEST_HEADER_MODULES['Cython'] = 'cython'
PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'
PYTEST_HEADER_MODULES['asdf'] = 'asdf'
TESTED_VERSIONS['Astropy'] = __version__
def pytest_unconfigure(config):
from astropy.utils.iers import conf as iers_conf
# Undo IERS auto download setting for testing
iers_conf.reset('auto_download')
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
if builtins._xdg_config_home_orig is None:
os.environ.pop('XDG_CONFIG_HOME')
else:
os.environ['XDG_CONFIG_HOME'] = builtins._xdg_config_home_orig
if builtins._xdg_cache_home_orig is None:
os.environ.pop('XDG_CACHE_HOME')
else:
os.environ['XDG_CACHE_HOME'] = builtins._xdg_cache_home_orig
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests may fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'https://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
b083f49c5a623cf28e75b25cd79688276dbe7ed19355a301169da9235586360e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file needs to be included here to make sure commands such
# as ``pytest docs/...`` works, since this
# will ignore the conftest.py file at the root of the repository
# and the one in astropy/conftest.py
import os
import tempfile
import pytest
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
@pytest.fixture(autouse=True)
def _docdir(request):
"""Run doctests in isolated tmpdir so outputs do not end up in repo"""
# Trigger ONLY for doctestplus
doctest_plugin = request.config.pluginmanager.getplugin("doctestplus")
if isinstance(request.node.parent, doctest_plugin._doctest_textfile_item_cls):
# Don't apply this fixture to io.rst. It reads files and doesn't write
if "io.rst" not in request.node.name:
tmpdir = request.getfixturevalue('tmpdir')
with tmpdir.as_cwd():
yield
else:
yield
else:
yield
|
6f600f3b21fec07e8bd0aecdf780f73a4b028ba7cacacb37ef41fecbcb1b874f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import os
import sys
import configparser
from datetime import datetime
from importlib import metadata
import doctest
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires('astropy'):
if 'extra == "docs"' in line:
req = Requirement(line.split(';')[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print('The following packages could not be found and are required to '
'build the documentation:')
for key, val in missing_requirements.items():
print(f' * {key} {val}')
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("X.Y.Z")` here.
check_sphinx_version("1.2.1") # noqa: F405
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy'] # noqa: F405
# add any custom intersphinx for astropy
intersphinx_mapping['astropy-dev'] = ('https://docs.astropy.org/en/latest/', None) # noqa: F405
intersphinx_mapping['pyerfa'] = ('https://pyerfa.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None) # noqa: F405
intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None) # noqa: F405, E501
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None) # noqa: F405, E501
intersphinx_mapping['packagetemplate'] = ('https://docs.astropy.org/projects/package-template/en/latest/', None) # noqa: F405, E501
intersphinx_mapping['h5py'] = ('https://docs.h5py.org/en/stable/', None) # noqa: F405
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates') # noqa: F405
exclude_patterns.append('changes') # noqa: F405
exclude_patterns.append('_pkgtemplate.rst') # noqa: F405
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them # noqa: F405, E501
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
extensions += ["sphinx_changelog"] # noqa: F405
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, 'setup.cfg'))
__minimum_python_version__ = setup_cfg['options']['python_requires'].replace('>=', '')
project = u'Astropy'
min_versions = {}
for line in metadata.requires('astropy'):
req = Requirement(line.split(';')[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt", "r") as cl:
rst_epilog += cl.read().format(minimum_python=__minimum_python_version__,
**min_versions)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT')
REMOTE_DATA = doctest.register_optionflag('REMOTE_DATA')
FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP')
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update({
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments", "Path",
# TODO! not need to ignore.
"flag", "bits",
})
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update({
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`"
})
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# -- Project information ------------------------------------------------------
author = u'The Astropy Developers'
copyright = f'2011β{datetime.utcnow().year}, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# Only include dev docs in dev version.
dev = 'dev' in release
if not dev:
exclude_patterns.append('development/*') # noqa: F405
exclude_patterns.append('testhelpers.rst') # noqa: F405
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ['astropy.']
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
# html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f'{project} v{release}'
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# A dictionary of values to pass into the template engineβs context for all pages.
html_context = {
'to_be_indexed': ['stable', 'latest'],
'is_development': dev
}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = 'https://github.com/astropy/astropy/issues/'
edit_on_github_branch = 'main'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# This is not used. See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery # noqa: F401
extensions += ["sphinx_gallery.gen_gallery"] # noqa: F405
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template # noqa: E501
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_" # noqa: E501
'examples_dirs': f'..{os.sep}examples', # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'https://matplotlib.org/stable/',
'numpy': 'https://numpy.org/doc/stable/',
},
'abort_on_example_error': True
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/',
'https://maia.usno.navy.mil/',
'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer',
'https://aa.usno.navy.mil/publications/docs/Circular_179.php',
'http://data.astropy.org',
'https://doi.org/10.1017/S0251107X00002406', # internal server error
'https://doi.org/10.1017/pasa.2013.31', # internal server error
r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs. """
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get('reftarget') # str or None
if str(reftarget).startswith('astropy:'):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, 'astropy:'
elif dev and str(reftarget).startswith('astropy-dev:'):
process, replace = True, 'astropy-dev:'
else:
process, replace = False, ''
# make link local
if process:
reftype = node.get('reftype')
refdoc = node.get('refdoc', app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patters, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, '')
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node['refdomain']]
return domain.resolve_xref(app.env, refdoc, app.builder,
reftype, reftarget, node, contnode)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = ('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference,
priority=400)
|
c07338c94a815b26b8846754da892de4c0e1b22dde30525e9980609aeaba4861 | # NOTE: this hook should be added to
# https://github.com/pyinstaller/pyinstaller-hooks-contrib
# once that repository is ready for pull requests
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('skyfield')
|
bfa48512aada02006c25f249d8b46593f5a541cd7244d7f675f740dab06a9099 | # -*- coding: utf-8 -*-
"""
========================
Title of Example
========================
This example <verb> <active tense> <does something>.
The example uses <packages> to <do something> and <other package> to <do other
thing>. Include links to referenced packages like this: `astropy.io.fits` to
show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits'
*By: <names>*
*License: BSD*
"""
##############################################################################
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# uncomment if including figures:
# import matplotlib.pyplot as plt
# from astropy.visualization import astropy_mpl_style
# plt.style.use(astropy_mpl_style)
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to comment
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possible to continue the discussion with a new Python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw Python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Output of the script is captured:
print('Some output from Python')
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the Python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
|
547790e0143fe9a8b1996e4cecd461eb313bd82630ddb4630845753fd1343eef | # -*- coding: utf-8 -*-
r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and
the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example,
we will define a coordinate system defined by the plane of orbit of the
Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003).
The Sgr coordinate system is often referred to in terms of two angular
coordinates, :math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
|
169d08766b27d0e8c3edf0512170a6796246d917db295c0b3463aa7bfb1c4330 | # -*- coding: utf-8 -*-
"""
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
*By: Adrian Price-Whelan*
*License: BSD*
"""
################################################################################
# Make print work the same in all versions of Python and import the required
# Astropy packages:
import astropy.units as u
import astropy.coordinates as coord
################################################################################
# Use the latest convention for the Galactocentric coordinates
coord.galactocentric_frame_defaults.set('latest')
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <https://simbad.u-strasbg.fr/simbad/>`_:
icrs = coord.SkyCoord(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s, frame='icrs')
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity`, optional
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
|
9035692fe1ca2a75b94864183c99f6dfb14e7c446c8fa7bdbb53ec46b965569a | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <https://simbad.u-strasbg.fr/simbad/>`_ database:
c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s,
frame='icrs')
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interpreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]")
axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]")
fig.tight_layout()
plt.show()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]')
ax.legend()
plt.show()
|
12bef7dc219d41692d2cbc9fe75a49900ea1f0df6e831e5d70e413d075a0fb12 | # -*- coding: utf-8 -*-
"""
===================================================================
Determining and plotting the altitude/azimuth of a celestial object
===================================================================
This example demonstrates coordinate transformations and the creation of
visibility curves to assist with observing run planning.
In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33.
The altitude-azimuth coordinates are then found using
`astropy.coordinates.EarthLocation` and `astropy.time.Time` objects.
This example is meant to demonstrate the capabilities of the
`astropy.coordinates` package. For more convenient and/or complex observation
planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_
package.
*By: Erik Tollerud, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Let's suppose you are planning to visit picturesque Bear Mountain State Park
# in New York, USA. You're bringing your telescope with you (of course), and
# someone told you M33 is a great target to observe there. You happen to know
# you're free at 11:00 pm local time, and you want to know if it will be up.
# Astropy can answer that.
#
# Import numpy and matplotlib. For the latter, use a nicer set of plot
# parameters and set up support for plotting/converting quantities.
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style, quantity_support
plt.style.use(astropy_mpl_style)
quantity_support()
##############################################################################
# Import the packages necessary for finding coordinates and making
# coordinate transformations
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
##############################################################################
# `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object
# names and retrieve coordinates.
#
# Get the coordinates of M33:
m33 = SkyCoord.from_name('M33')
##############################################################################
# Use `astropy.coordinates.EarthLocation` to provide the location of Bear
# Mountain and set the time to 11pm EDT on 2012 July 12:
bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2012-7-12 23:00:00') - utcoffset
##############################################################################
# `astropy.coordinates.EarthLocation.get_site_names` and
# `~astropy.coordinates.EarthLocation.get_site_names` can be used to get
# locations of major observatories.
#
# Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as
# observed from Bear Mountain at 11pm on 2012 July 12.
m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain))
print(f"M33's Altitude = {m33altaz.alt:.2}")
##############################################################################
# This is helpful since it turns out M33 is barely above the horizon at this
# time. It's more informative to find M33's airmass over the course of
# the night.
#
# Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm
# and 7am EDT:
midnight = Time('2012-7-13 00:00:00') - utcoffset
delta_midnight = np.linspace(-2, 10, 100)*u.hour
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
m33altazs_July13night = m33.transform_to(frame_July13night)
##############################################################################
# convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute:
m33airmasss_July13night = m33altazs_July13night.secz
##############################################################################
# Plot the airmass as a function of time:
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
##############################################################################
# Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000
# evenly spaced times between noon on July 12 and noon on July 13:
from astropy.coordinates import get_sun
delta_midnight = np.linspace(-12, 12, 1000)*u.hour
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13)
##############################################################################
# Do the same with `~astropy.coordinates.get_moon` to find when the moon is
# up. Be aware that this will need to download a 10MB file from the internet
# to get a precise location of the moon.
from astropy.coordinates import get_moon
moon_July12_to_13 = get_moon(times_July12_to_13)
moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13)
##############################################################################
# Find the alt,az coordinates of M33 at those same times:
m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13)
##############################################################################
# Make a beautiful figure illustrating nighttime and the altitudes of M33 and
# the Sun over that time:
plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun')
plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon')
plt.scatter(delta_midnight, m33altazs_July12_to_13.alt,
c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8,
cmap='viridis')
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12*u.hour, 12*u.hour)
plt.xticks((np.arange(13)*2-12)*u.hour)
plt.ylim(0*u.deg, 90*u.deg)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
|
fbb510c5e0186d5e16961c613d3831dbd37a4d215fe262121af70c4b3d053210 | # -*- coding: utf-8 -*-
"""
==================
Edit a FITS header
==================
This example describes how to edit a value in a FITS header
using `astropy.io.fits`.
*By: Adrian Price-Whelan*
*License: BSD*
"""
from astropy.io import fits
##############################################################################
# Download a FITS file:
from astropy.utils.data import get_pkg_data_filename
fits_file = get_pkg_data_filename('tutorials/FITS-Header/input_file.fits')
##############################################################################
# Look at contents of the FITS file
fits.info(fits_file)
##############################################################################
# Look at the headers of the two extensions:
print("Before modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
##############################################################################
# `astropy.io.fits` provides an object-oriented interface for reading and
# interacting with FITS files, but for small operations (like this example) it
# is often easier to use the
# `convenience functions <https://docs.astropy.org/en/latest/io/fits/index.html#convenience-functions>`_.
#
# To edit a single header value in the header for extension 0, use the
# `~astropy.io.fits.setval()` function. For example, set the OBJECT keyword
# to 'M31':
fits.setval(fits_file, 'OBJECT', value='M31')
##############################################################################
# With no extra arguments, this will modify the header for extension 0, but
# this can be changed using the ``ext`` keyword argument. For example, we can
# specify extension 1 instead:
fits.setval(fits_file, 'OBJECT', value='M31', ext=1)
##############################################################################
# This can also be used to create a new keyword-value pair ("card" in FITS
# lingo):
fits.setval(fits_file, 'ANEWKEY', value='some value')
##############################################################################
# Again, this is useful for one-off modifications, but can be inefficient
# for operations like editing multiple headers in the same file
# because `~astropy.io.fits.setval()` loads the whole file each time it
# is called. To make several modifications, it's better to load the file once:
with fits.open(fits_file, 'update') as f:
for hdu in f:
hdu.header['OBJECT'] = 'CAT'
print("After modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
|
f060aa9837a0df597dbb32aa17d9db8fda514facc6f269012d3fc9611751b903 | # -*- coding: utf-8 -*-
"""
=====================================================
Create a multi-extension FITS (MEF) file from scratch
=====================================================
This example demonstrates how to create a multi-extension FITS (MEF)
file from scratch using `astropy.io.fits`.
*By: Erik Bray*
*License: BSD*
"""
import os
##############################################################################
# HDUList objects are used to hold all the HDUs in a FITS file. This
# ``HDUList`` class is a subclass of Python's builtin `list`. and can be
# created from scratch. For example, to create a FITS file with
# three extensions:
from astropy.io import fits
new_hdul = fits.HDUList()
new_hdul.append(fits.ImageHDU())
new_hdul.append(fits.ImageHDU())
##############################################################################
# Write out the new file to disk:
new_hdul.writeto('test.fits')
##############################################################################
# Alternatively, the HDU instances can be created first (or read from an
# existing FITS file).
#
# Create a multi-extension FITS file with two empty IMAGE extensions (a
# default PRIMARY HDU is prepended automatically if one is not specified;
# we use ``overwrite=True`` to overwrite the file if it already exists):
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
new_hdul = fits.HDUList([hdu1, hdu2])
new_hdul.writeto('test.fits', overwrite=True)
##############################################################################
# Finally, we'll remove the file we created:
os.remove('test.fits')
|
f8e82631004deaaab5e0800e207f27d16652fb1494aaeb6bec42c040fe4f5c35 | # -*- coding: utf-8 -*-
"""
=====================================================================
Accessing data stored as a table in a multi-extension FITS (MEF) file
=====================================================================
FITS files can often contain large amount of multi-dimensional data and
tables. This example opens a FITS file with information
from Chandra's HETG-S instrument.
The example uses `astropy.utils.data` to download multi-extension FITS (MEF)
file, `astropy.io.fits` to investigate the header, and
`astropy.table.Table` to explore the data.
*By: Lia Corrales, Adrian Price-Whelan, and Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Use `astropy.utils.data` subpackage to download the FITS file used in this
# example. Also import `~astropy.table.Table` from the `astropy.table` subpackage
# and `astropy.io.fits`
from astropy.utils.data import get_pkg_data_filename
from astropy.table import Table
from astropy.io import fits
##############################################################################
# Download a FITS file
event_filename = get_pkg_data_filename('tutorials/FITS-tables/chandra_events.fits')
##############################################################################
# Display information about the contents of the FITS file.
fits.info(event_filename)
##############################################################################
# Extension 1, EVENTS, is a Table that contains information about each X-ray
# photon that hit Chandra's HETG-S detector.
#
# Use `~astropy.table.Table` to read the table
events = Table.read(event_filename, hdu=1)
##############################################################################
# Print the column names of the Events Table.
print(events.columns)
##############################################################################
# If a column contains unit information, it will have an associated
# `astropy.units` object.
print(events['energy'].unit)
##############################################################################
# Print the data stored in the Energy column.
print(events['energy'])
|
78cf5c60f74125abbfa26c0a951f75863b38c484d06ca630714937cedada6e19 | # -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
|
042abe04a704fa7bcd949b1e95727d07e411096873c00d61d30b3d539d20354a | # -*- coding: utf-8 -*-
"""
==========================================
Create a very large FITS file from scratch
==========================================
This example demonstrates how to create a large file (larger than will fit in
memory) from scratch using `astropy.io.fits`.
*By: Erik Bray*
*License: BSD*
"""
##############################################################################
# Normally to create a single image FITS file one would do something like:
import os
import numpy as np
from astropy.io import fits
data = np.zeros((40000, 40000), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
##############################################################################
# Then use the `astropy.io.fits.writeto()` method to write out the new
# file to disk
hdu.writeto('large.fits')
##############################################################################
# However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most
# systems won't be able to create that in memory just to write out to disk. In
# order to create such a large file efficiently requires a little extra work,
# and a few assumptions.
#
# First, it is helpful to anticipate about how large (as in, how many keywords)
# the header will have in it. FITS headers must be written in 2880 byte
# blocks, large enough for 36 keywords per block (including the END keyword in
# the final block). Typical headers have somewhere between 1 and 4 blocks,
# though sometimes more.
#
# Since the first thing we write to a FITS file is the header, we want to write
# enough header blocks so that there is plenty of padding in which to add new
# keywords without having to resize the whole file. Say you want the header to
# use 4 blocks by default. Then, excluding the END card which Astropy will add
# automatically, create the header and pad it out to 36 * 4 cards.
#
# Create a stub array to initialize the HDU; its
# exact size is irrelevant, as long as it has the desired number of
# dimensions
data = np.zeros((100, 100), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
while len(header) < (36 * 4 - 1):
header.append() # Adds a blank card to the end
##############################################################################
# Now adjust the NAXISn keywords to the desired size of the array, and write
# only the header out to a file. Using the ``hdu.writeto()`` method will cause
# astropy to "helpfully" reset the NAXISn keywords to match the size of the
# dummy array. That is because it works hard to ensure that only valid FITS
# files are written. Instead, we can write just the header to a file using the
# `astropy.io.fits.Header.tofile` method:
header['NAXIS1'] = 40000
header['NAXIS2'] = 40000
header.tofile('large.fits')
##############################################################################
# Finally, grow out the end of the file to match the length of the
# data (plus the length of the header). This can be done very efficiently on
# most systems by seeking past the end of the file and writing a single byte,
# like so:
with open('large.fits', 'rb+') as fobj:
# Seek past the length of the header, plus the length of the
# Data we want to write.
# 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8
# (this example is assuming a 64-bit float)
# The -1 is to account for the final byte that we are about to
# write:
fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1)
fobj.write(b'\0')
##############################################################################
# More generally, this can be written:
shape = tuple(header[f'NAXIS{ii}'] for ii in range(1, header['NAXIS']+1))
with open('large.fits', 'rb+') as fobj:
fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1)
fobj.write(b'\0')
##############################################################################
# On modern operating systems this will cause the file (past the header) to be
# filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On
# filesystems that support sparse file creation (most Linux filesystems, but not
# the HFS+ filesystem used by most Macs) this is a very fast, efficient
# operation. On other systems your mileage may vary.
#
# This isn't the only way to build up a large file, but probably one of the
# safest. This method can also be used to create large multi-extension FITS
# files, with a little care.
##############################################################################
# Finally, we'll remove the file we created:
os.remove('large.fits')
|
93f258becc4ec5da3768b6fd02d801854a4cb29f419856cfe08914b80cd312c8 | # -*- coding: utf-8 -*-
"""
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <https://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print(f"Image size: {ysize} x {xsize}")
print(f"Image bands: {image.getbands()}")
ax = plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize) # data is now a matrix (ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
print(r_data.shape)
##############################################################################
# Write out the channels as separate FITS images.
# Add and visualize header info
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
from pprint import pprint
pprint(red.header)
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
|
39925d73352c68f7255c1d0d924280ef6398a56b59b5ccb3c0347e3e2b53f09f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<https://github.com/pynbody/pynbody>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
# Lots of things to import - go from more basic to advanced, so that
# whatever advanced ones need generally has been imported already;
# this helps prevent circular imports and makes it easier to understand
# where most time is spent (e.g., using python -X importtime).
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from . import photometric
from . import misc
from .function import units as function_units
from .si import *
from .astrophys import *
from .photometric import *
from .cgs import *
from .physical import *
from .function.units import *
from .misc import *
from .equivalencies import *
from .function.core import *
from .function.logarithmic import *
from .structured import *
from .decorators import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys, function_units, misc, photometric])
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "littleh":
from astropy.units.astrophys import littleh
return littleh
elif attr == "with_H0":
from astropy.units.equivalencies import with_H0
return with_H0
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
a671682046f0a87fb4fee08f99dc39dc03225cc8bc17033841e75048b23dbe9d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun "
"distance.")
def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True,
doc="parsec: approximately 3.26 light-years.")
def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns,
doc="Solar radius", prefixes=False,
format={'latex': r'R_{\odot}', 'unicode': 'R\N{SUN}'})
def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'],
_si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'R_{\rm J}', 'unicode': 'R\N{JUPITER}'})
def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns,
prefixes=False, doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={'latex': r'R_{\oplus}', 'unicode': 'Rβ'})
def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m),
namespace=_ns, prefixes=True, doc="Light year")
def_unit(['lsec', 'lightsecond'], (_si.c * si.s).to(si.m),
namespace=_ns, prefixes=False, doc="Light second")
###########################################################################
# MASS
def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns,
prefixes=False, doc="Solar mass",
format={'latex': r'M_{\odot}', 'unicode': 'M\N{SUN}'})
def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'],
_si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'M_{\rm J}', 'unicode': 'M\N{JUPITER}'})
def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns,
prefixes=False, doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={'latex': r'M_{\oplus}', 'unicode': 'Mβ'})
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(['Ry', 'rydberg'],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns, prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg "
"constant",
format={'latex': r'R_{\infty}', 'unicode': 'Rβ'})
###########################################################################
# ILLUMINATION
def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns,
prefixes=False, doc="Solar luminance",
format={'latex': r'L_{\odot}', 'unicode': 'L\N{SUN}'})
###########################################################################
# SPECTRAL DENSITY
def_unit((['ph', 'photon'], ['photon']),
format={'ogip': 'photon', 'vounit': 'photon'},
namespace=_ns, prefixes=True)
def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz,
namespace=_ns, prefixes=True,
doc="Jansky: spectral flux density")
def_unit(['R', 'Rayleigh', 'rayleigh'],
(1e10 / (4 * _numpy.pi)) *
ph * si.m ** -2 * si.s ** -1 * si.sr ** -1,
namespace=_ns, prefixes=True,
doc="Rayleigh: photon flux")
###########################################################################
# EVENTS
def_unit((['ct', 'count'], ['count']),
format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'},
namespace=_ns, prefixes=True, exclude_prefixes=['p'])
def_unit(['adu'], namespace=_ns, prefixes=True)
def_unit(['DN', 'dn'], namespace=_ns, prefixes=False)
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(['Sun'], namespace=_ns)
def_unit(['chan'], namespace=_ns, prefixes=True)
def_unit(['bin'], namespace=_ns, prefixes=True)
def_unit(['beam'], namespace=_ns, prefixes=True)
def_unit(['electron'], doc="Number of electrons", namespace=_ns,
format={'latex': r'e^{-}', 'unicode': 'eβ»'})
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "littleh":
import warnings
from astropy.cosmology.units import littleh
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
("`littleh` is deprecated from module `astropy.units.astrophys` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.littleh` instead."),
AstropyDeprecationWarning)
return littleh
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
0b2781faaca6b6933abaa13f2824ea15230bbd10fdeb9f957443315a055f1d91 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Support for ``typing`` py3.9+ features while min version is py3.8.
"""
from typing import *
try: # py 3.9+
from typing import Annotated
except (ImportError, ModuleNotFoundError): # optional dependency
try:
from typing_extensions import Annotated
except (ImportError, ModuleNotFoundError):
Annotated = NotImplemented
else:
from typing_extensions import * # override typing
HAS_ANNOTATED = Annotated is not NotImplemented
|
46d21d999a60a43886c5596edc5958a66299b51124ae7af43c8397e3ab1ac805 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Defines the physical types that correspond to different units."""
import numbers
import warnings
from . import core
from . import si
from . import astrophys
from . import cgs
from . import imperial # Need this for backward namespace compat, see issues 11975 and 11977 # noqa
from . import misc
from . import quantity
from astropy.utils.exceptions import AstropyDeprecationWarning
__all__ = ["def_physical_type", "get_physical_type", "PhysicalType"]
_units_and_physical_types = [
(core.dimensionless_unscaled, "dimensionless"),
(si.m, "length"),
(si.m ** 2, "area"),
(si.m ** 3, "volume"),
(si.s, "time"),
(si.rad, "angle"),
(si.sr, "solid angle"),
(si.m / si.s, {"speed", "velocity"}),
(si.m / si.s ** 2, "acceleration"),
(si.Hz, "frequency"),
(si.g, "mass"),
(si.mol, "amount of substance"),
(si.K, "temperature"),
(si.W * si.m ** -1 * si.K ** -1, "thermal conductivity"),
(si.J * si.K ** -1, {"heat capacity", "entropy"}),
(si.J * si.K ** -1 * si.kg ** -1, {"specific heat capacity", "specific entropy"}),
(si.N, "force"),
(si.J, {"energy", "work", "torque"}),
(si.J * si.m ** -2 * si.s ** -1, {"energy flux", "irradiance"}),
(si.Pa, {"pressure", "energy density", "stress"}),
(si.W, {"power", "radiant flux"}),
(si.kg * si.m ** -3, "mass density"),
(si.m ** 3 / si.kg, "specific volume"),
(si.mol / si.m ** 3, "molar concentration"),
(si.m ** 3 / si.mol, "molar volume"),
(si.kg * si.m / si.s, {"momentum", "impulse"}),
(si.kg * si.m ** 2 / si.s, {"angular momentum", "action"}),
(si.rad / si.s, {"angular speed", "angular velocity", "angular frequency"}),
(si.rad / si.s ** 2, "angular acceleration"),
(si.rad / si.m, "plate scale"),
(si.g / (si.m * si.s), "dynamic viscosity"),
(si.m ** 2 / si.s, {"diffusivity", "kinematic viscosity"}),
(si.m ** -1, "wavenumber"),
(si.m ** -2, "column density"),
(si.A, "electrical current"),
(si.C, "electrical charge"),
(si.V, "electrical potential"),
(si.Ohm, {"electrical resistance", "electrical impedance", "electrical reactance"}),
(si.Ohm * si.m, "electrical resistivity"),
(si.S, "electrical conductance"),
(si.S / si.m, "electrical conductivity"),
(si.F, "electrical capacitance"),
(si.C * si.m, "electrical dipole moment"),
(si.A / si.m ** 2, "electrical current density"),
(si.V / si.m, "electrical field strength"),
(si.C / si.m ** 2,
{"electrical flux density", "surface charge density", "polarization density"},
),
(si.C / si.m ** 3, "electrical charge density"),
(si.F / si.m, "permittivity"),
(si.Wb, "magnetic flux"),
(si.T, "magnetic flux density"),
(si.A / si.m, "magnetic field strength"),
(si.m ** 2 * si.A, "magnetic moment"),
(si.H / si.m, {"electromagnetic field strength", "permeability"}),
(si.H, "inductance"),
(si.cd, "luminous intensity"),
(si.lm, "luminous flux"),
(si.lx, {"luminous emittance", "illuminance"}),
(si.W / si.sr, "radiant intensity"),
(si.cd / si.m ** 2, "luminance"),
(si.m ** -3 * si.s ** -1, "volumetric rate"),
(astrophys.Jy, "spectral flux density"),
(si.W * si.m ** 2 * si.Hz ** -1, "surface tension"),
(si.J * si.m ** -3 * si.s ** -1, {"spectral flux density wav", "power density"}),
(astrophys.photon / si.Hz / si.cm ** 2 / si.s, "photon flux density"),
(astrophys.photon / si.AA / si.cm ** 2 / si.s, "photon flux density wav"),
(astrophys.R, "photon flux"),
(misc.bit, "data quantity"),
(misc.bit / si.s, "bandwidth"),
(cgs.Franklin, "electrical charge (ESU)"),
(cgs.statampere, "electrical current (ESU)"),
(cgs.Biot, "electrical current (EMU)"),
(cgs.abcoulomb, "electrical charge (EMU)"),
(si.m * si.s ** -3, {"jerk", "jolt"}),
(si.m * si.s ** -4, {"snap", "jounce"}),
(si.m * si.s ** -5, "crackle"),
(si.m * si.s ** -6, {"pop", "pounce"}),
(si.K / si.m, "temperature gradient"),
(si.J / si.kg, "specific energy"),
(si.mol * si.m ** -3 * si.s ** -1, "reaction rate"),
(si.kg * si.m ** 2, "moment of inertia"),
(si.mol / si.s, "catalytic activity"),
(si.J * si.K ** -1 * si.mol ** -1, "molar heat capacity"),
(si.mol / si.kg, "molality"),
(si.m * si.s, "absement"),
(si.m * si.s ** 2, "absity"),
(si.m ** 3 / si.s, "volumetric flow rate"),
(si.s ** -2, "frequency drift"),
(si.Pa ** -1, "compressibility"),
(astrophys.electron * si.m ** -3, "electron density"),
(astrophys.electron * si.m ** -2 * si.s ** -1, "electron flux"),
(si.kg / si.m ** 2, "surface mass density"),
(si.W / si.m ** 2 / si.sr, "radiance"),
(si.J / si.mol, "chemical potential"),
(si.kg / si.m, "linear density"),
(si.H ** -1, "magnetic reluctance"),
(si.W / si.K, "thermal conductance"),
(si.K / si.W, "thermal resistance"),
(si.K * si.m / si.W, "thermal resistivity"),
(si.N / si.s, "yank"),
(si.S * si.m ** 2 / si.mol, "molar conductivity"),
(si.m ** 2 / si.V / si.s, "electrical mobility"),
(si.lumen / si.W, "luminous efficacy"),
(si.m ** 2 / si.kg, {"opacity", "mass attenuation coefficient"}),
(si.kg * si.m ** -2 * si.s ** -1, {"mass flux", "momentum density"}),
(si.m ** -3, "number density"),
(si.m ** -2 * si.s ** -1, "particle flux"),
]
_physical_unit_mapping = {}
_unit_physical_mapping = {}
_name_physical_mapping = {}
# mapping from attribute-accessible name (no spaces, etc.) to the actual name.
_attrname_physical_mapping = {}
def _physical_type_from_str(name):
"""
Return the `PhysicalType` instance associated with the name of a
physical type.
"""
if name == "unknown":
raise ValueError("cannot uniquely identify an 'unknown' physical type.")
elif name in _attrname_physical_mapping:
return _attrname_physical_mapping[name] # convert attribute-accessible
elif name in _name_physical_mapping:
return _name_physical_mapping[name]
else:
raise ValueError(f"{name!r} is not a known physical type.")
def _replace_temperatures_with_kelvin(unit):
"""
If a unit contains a temperature unit besides kelvin, then replace
that unit with kelvin.
Temperatures cannot be converted directly between K, Β°F, Β°C, and
Β°Ra, in particular since there would be different conversions for
T and ΞT. However, each of these temperatures each represents the
physical type. Replacing the different temperature units with
kelvin allows the physical type to be treated consistently.
"""
physical_type_id = unit._get_physical_type_id()
physical_type_id_components = []
substitution_was_made = False
for base, power in physical_type_id:
if base in ["deg_F", "deg_C", "deg_R"]:
base = "K"
substitution_was_made = True
physical_type_id_components.append((base, power))
if substitution_was_made:
return core.Unit._from_physical_type_id(tuple(physical_type_id_components))
else:
return unit
def _standardize_physical_type_names(physical_type_input):
"""
Convert a string or `set` of strings into a `set` containing
string representations of physical types.
The strings provided in ``physical_type_input`` can each contain
multiple physical types that are separated by a regular slash.
Underscores are treated as spaces so that variable names could
be identical to physical type names.
"""
if isinstance(physical_type_input, str):
physical_type_input = {physical_type_input}
standardized_physical_types = set()
for ptype_input in physical_type_input:
if not isinstance(ptype_input, str):
raise ValueError(f"expecting a string, but got {ptype_input}")
input_set = set(ptype_input.split("/"))
processed_set = {s.strip().replace("_", " ") for s in input_set}
standardized_physical_types |= processed_set
return standardized_physical_types
class PhysicalType:
"""
Represents the physical type(s) that are dimensionally compatible
with a set of units.
Instances of this class should be accessed through either
`get_physical_type` or by using the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
This class is not intended to be instantiated directly in user code.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
physical_types : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Notes
-----
A physical type will be considered equal to an equivalent
`PhysicalType` instance (recommended) or a string that contains a
name of the physical type. The latter method is not recommended
in packages, as the names of some physical types may change in the
future.
To maintain backwards compatibility, two physical type names may be
included in one string if they are separated with a slash (e.g.,
``"momentum/impulse"``). String representations of physical types
may include underscores instead of spaces.
Examples
--------
`PhysicalType` instances may be accessed via the
`~astropy.units.core.UnitBase.physical_type` attribute of units.
>>> import astropy.units as u
>>> u.meter.physical_type
PhysicalType('length')
`PhysicalType` instances may also be accessed by calling
`get_physical_type`. This function will accept a unit, a string
containing the name of a physical type, or the number one.
>>> u.get_physical_type(u.m ** -3)
PhysicalType('number density')
>>> u.get_physical_type("volume")
PhysicalType('volume')
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
Some units are dimensionally compatible with multiple physical types.
A pascal is intended to represent pressure and stress, but the unit
decomposition is equivalent to that of energy density.
>>> pressure = u.get_physical_type("pressure")
>>> pressure
PhysicalType({'energy density', 'pressure', 'stress'})
>>> 'energy density' in pressure
True
Physical types can be tested for equality against other physical
type objects or against strings that may contain the name of a
physical type.
>>> area = (u.m ** 2).physical_type
>>> area == u.barn.physical_type
True
>>> area == "area"
True
Multiplication, division, and exponentiation are enabled so that
physical types may be used for dimensional analysis.
>>> length = u.pc.physical_type
>>> area = (u.cm ** 2).physical_type
>>> length * area
PhysicalType('volume')
>>> area / length
PhysicalType('length')
>>> length ** 3
PhysicalType('volume')
may also be performed using a string that contains the name of a
physical type.
>>> "length" * area
PhysicalType('volume')
>>> "area" / length
PhysicalType('length')
Unknown physical types are labelled as ``"unknown"``.
>>> (u.s ** 13).physical_type
PhysicalType('unknown')
Dimensional analysis may be performed for unknown physical types too.
>>> length_to_19th_power = (u.m ** 19).physical_type
>>> length_to_20th_power = (u.m ** 20).physical_type
>>> length_to_20th_power / length_to_19th_power
PhysicalType('length')
"""
def __init__(self, unit, physical_types):
self._unit = _replace_temperatures_with_kelvin(unit)
self._physical_type_id = self._unit._get_physical_type_id()
self._physical_type = _standardize_physical_type_names(physical_types)
self._physical_type_list = sorted(self._physical_type)
def __iter__(self):
yield from self._physical_type_list
def __getattr__(self, attr):
# TODO: remove this whole method when accessing str attributes from
# physical types is no longer supported
# short circuit attribute accessed in __str__ to prevent recursion
if attr == '_physical_type_list':
super().__getattribute__(attr)
self_str_attr = getattr(str(self), attr, None)
if hasattr(str(self), attr):
warning_message = (
f"support for accessing str attributes such as {attr!r} "
"from PhysicalType instances is deprecated since 4.3 "
"and will be removed in a subsequent release.")
warnings.warn(warning_message, AstropyDeprecationWarning)
return self_str_attr
else:
super().__getattribute__(attr) # to get standard error message
def __eq__(self, other):
"""
Return `True` if ``other`` represents a physical type that is
consistent with the physical type of the `PhysicalType` instance.
"""
if isinstance(other, PhysicalType):
return self._physical_type_id == other._physical_type_id
elif isinstance(other, str):
other = _standardize_physical_type_names(other)
return other.issubset(self._physical_type)
else:
return NotImplemented
def __ne__(self, other):
equality = self.__eq__(other)
return not equality if isinstance(equality, bool) else NotImplemented
def _name_string_as_ordered_set(self):
return "{" + str(self._physical_type_list)[1:-1] + "}"
def __repr__(self):
if len(self._physical_type) == 1:
names = "'" + self._physical_type_list[0] + "'"
else:
names = self._name_string_as_ordered_set()
return f"PhysicalType({names})"
def __str__(self):
return "/".join(self._physical_type_list)
@staticmethod
def _dimensionally_compatible_unit(obj):
"""
Return a unit that corresponds to the provided argument.
If a unit is passed in, return that unit. If a physical type
(or a `str` with the name of a physical type) is passed in,
return a unit that corresponds to that physical type. If the
number equal to ``1`` is passed in, return a dimensionless unit.
Otherwise, return `NotImplemented`.
"""
if isinstance(obj, core.UnitBase):
return _replace_temperatures_with_kelvin(obj)
elif isinstance(obj, PhysicalType):
return obj._unit
elif isinstance(obj, numbers.Real) and obj == 1:
return core.dimensionless_unscaled
elif isinstance(obj, str):
return _physical_type_from_str(obj)._unit
else:
return NotImplemented
def _dimensional_analysis(self, other, operation):
other_unit = self._dimensionally_compatible_unit(other)
if other_unit is NotImplemented:
return NotImplemented
other_unit = _replace_temperatures_with_kelvin(other_unit)
new_unit = getattr(self._unit, operation)(other_unit)
return new_unit.physical_type
def __mul__(self, other):
return self._dimensional_analysis(other, "__mul__")
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._dimensional_analysis(other, "__truediv__")
def __rtruediv__(self, other):
other = self._dimensionally_compatible_unit(other)
if other is NotImplemented:
return NotImplemented
return other.physical_type._dimensional_analysis(self, "__truediv__")
def __pow__(self, power):
return (self._unit ** power).physical_type
def __hash__(self):
return hash(self._physical_type_id)
def __len__(self):
return len(self._physical_type)
# We need to prevent operations like where a Unit instance left
# multiplies a PhysicalType instance from returning a `Quantity`
# instance with a PhysicalType as the value. We can do this by
# preventing np.array from casting a PhysicalType instance as
# an object array.
__array__ = None
def def_physical_type(unit, name):
"""
Add a mapping between a unit and the corresponding physical type(s).
If a physical type already exists for a unit, add new physical type
names so long as those names are not already in use for other
physical types.
Parameters
----------
unit : `~astropy.units.Unit`
The unit to be represented by the physical type.
name : `str` or `set` of `str`
A `str` representing the name of the physical type of the unit,
or a `set` containing strings that represent one or more names
of physical types.
Raises
------
ValueError
If a physical type name is already in use for another unit, or
if attempting to name a unit as ``"unknown"``.
"""
physical_type_id = unit._get_physical_type_id()
physical_type_names = _standardize_physical_type_names(name)
if "unknown" in physical_type_names:
raise ValueError("cannot uniquely define an unknown physical type")
names_for_other_units = set(_unit_physical_mapping.keys()).difference(
_physical_unit_mapping.get(physical_type_id, {}))
names_already_in_use = physical_type_names & names_for_other_units
if names_already_in_use:
raise ValueError(
f"the following physical type names are already in use: "
f"{names_already_in_use}.")
unit_already_in_use = physical_type_id in _physical_unit_mapping
if unit_already_in_use:
physical_type = _physical_unit_mapping[physical_type_id]
physical_type_names |= set(physical_type)
physical_type.__init__(unit, physical_type_names)
else:
physical_type = PhysicalType(unit, physical_type_names)
_physical_unit_mapping[physical_type_id] = physical_type
for ptype in physical_type:
_unit_physical_mapping[ptype] = physical_type_id
for ptype_name in physical_type_names:
_name_physical_mapping[ptype_name] = physical_type
# attribute-accessible name
attr_name = ptype_name.replace(' ', '_').replace('(', '').replace(')', '')
_attrname_physical_mapping[attr_name] = physical_type
def get_physical_type(obj):
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
try:
unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._get_physical_type_id()
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown")
# ------------------------------------------------------------------------------
# Script section creating the physical types and the documentation
# define the physical types
for unit, physical_type in _units_and_physical_types:
def_physical_type(unit, physical_type)
# For getting the physical types.
def __getattr__(name):
"""Checks for physical types using lazy import.
This also allows user-defined physical types to be accessible from the
:mod:`astropy.units.physical` module.
See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_
Parameters
----------
name : str
The name of the attribute in this module. If it is already defined,
then this function is not called.
Returns
-------
ptype : `~astropy.units.physical.PhysicalType`
Raises
------
AttributeError
If the ``name`` does not correspond to a physical type
"""
if name in _attrname_physical_mapping:
return _attrname_physical_mapping[name]
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
def __dir__():
"""Return contents directory (__all__ + all physical type names)."""
return list(set(__all__) | set(_attrname_physical_mapping.keys()))
# This generates a docstring addition for this module that describes all of the
# standard physical types defined here.
if __doc__ is not None:
doclines = [
".. list-table:: Defined Physical Types",
" :header-rows: 1",
" :widths: 30 10 50",
"",
" * - Physical type",
" - Unit",
" - Other physical type(s) with same unit"]
for name in sorted(_name_physical_mapping.keys()):
physical_type = _name_physical_mapping[name]
doclines.extend([
f" * - _`{name}`",
f" - :math:`{physical_type._unit.to_string('latex')[1:-1]}`",
f" - {', '.join([n for n in physical_type if n != name])}"])
__doc__ += '\n\n' + '\n'.join(doclines)
del unit, physical_type
|
993c714f2008288a714601027ad005de4d23245e3e460918c66069eb7966a3f1 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines miscellaneous units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr,
namespace=_ns, prefixes=False,
doc="spat: the solid angle of the sphere, 4pi sr")
##########################################################################
# PRESSURE
def_unit(['bar'], 1e5 * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="bar: pressure")
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere")
###########################################################################
# MASS
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mβ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mβ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
###########################################################################
# COMPUTER
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
010acb8739268afb39b57f68a20f26d00f041fc45ee6f0591da35fd4805716f5 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines SI prefixed units that are required by the VOUnit standard
but that are rarely used in practice and liable to lead to confusion (such as
``msolMass`` for milli-solar mass). They are in a separate module from
`astropy.units.deprecated` because they need to be enabled by default for
`astropy.units` to parse compliant VOUnit strings. As a result, e.g.,
``Unit('msolMass')`` will just work, but to access the unit directly, use
``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom
possible for the non-prefixed unit, ``astropy.units.solMass``.
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import cgs
from . import astrophys
from .core import def_unit, _add_prefixes
_add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import (generate_unit_summary as _generate_unit_summary,
generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary)
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def _enable():
"""
Enable the VOUnit-required extra units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')``
idiom.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(_enable))
# Because these are VOUnit mandated units, they start enabled (which is why the
# function is hidden).
_enable()
|
98d90b12739822c658236023dbfa4ba1e70bcecfe2b6c8ce4758d9dac28d0ff6 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the CGS units. They are also available in the
top-level `astropy.units` namespace.
"""
from fractions import Fraction
from . import si
from .core import UnitBase, def_unit
_ns = globals()
def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False)
g = si.g
s = si.s
C = si.C
rad = si.rad
sr = si.sr
cd = si.cd
K = si.K
deg_C = si.deg_C
mol = si.mol
##########################################################################
# ACCELERATION
def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True,
doc="Gal: CGS unit of acceleration")
##########################################################################
# ENERGY
# Use CGS definition of erg
def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True,
doc="erg: CGS unit of energy")
##########################################################################
# FORCE
def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns,
prefixes=True,
doc="dyne: CGS unit of force")
##########################################################################
# PRESSURE
def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns,
prefixes=True,
doc="Barye: CGS unit of pressure")
##########################################################################
# DYNAMIC VISCOSITY
def_unit(['P', 'poise'], g / (cm * s), namespace=_ns,
prefixes=True,
doc="poise: CGS unit of dynamic viscosity")
##########################################################################
# KINEMATIC VISCOSITY
def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns,
prefixes=True,
doc="stokes: CGS unit of kinematic viscosity")
##########################################################################
# WAVENUMBER
def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns,
prefixes=True,
doc="kayser: CGS unit of wavenumber")
###########################################################################
# ELECTRICAL
def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m,
namespace=_ns, prefixes=True,
doc="Debye: CGS unit of electric dipole moment")
def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'],
g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1,
namespace=_ns,
doc='Franklin: CGS (ESU) unit of charge')
def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns,
doc='statampere: CGS (ESU) unit of current')
def_unit(['Bi', 'Biot', 'abA', 'abampere'],
g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns,
doc='abcoulomb: CGS (EMU) of charge')
###########################################################################
# MAGNETIC
def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True,
doc="Gauss: CGS unit for magnetic field")
###########################################################################
# BASES
bases = set([cm, g, s, rad, cd, K, mol])
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
del Fraction
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
803f838d8eb7c8a0c705c85801a5d3a7910d261cd3ec6b0c93f820d7a20e9f56 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines deprecated units.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.deprecated`
module::
>>> from astropy.units import deprecated
>>> q = 10. * deprecated.emu # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import deprecated
>>> deprecated.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import cgs
from . import astrophys
from .core import def_unit, _add_prefixes
def_unit(['emu'], cgs.Bi, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
# Add only some *prefixes* as deprecated units.
_add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import (generate_unit_summary as _generate_unit_summary,
generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary)
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def enable():
"""
Enable deprecated units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable deprecated
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(enable))
|
798e04161d76df505a2134d2a0639c178eeb668445a39d09c62316f3f6122d68 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
import io
import re
from fractions import Fraction
import numpy as np
from numpy import finfo
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ''
if isinstance(unit, core.Unit):
represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`"
aliases = ', '.join(f'``{x}``' for x in unit.aliases)
yield (unit, doc, represents, aliases, 'Yes' if unit.name in has_prefixes else 'No')
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write("""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
""")
for unit_summary in _iter_unit_summary(namespace):
docstring.write("""
* - ``{}``
- {}
- {}
- {}
- {}
""".format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace):
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for nm, unit in namespace.items():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write("""
* - Prefixes for ``{}``
- {} prefixes
- {}
- {}
- Only
""".format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and
_JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# We cannot have numpy scalars, since they don't autoconvert to
# complex if necessary. They are also slower.
if hasattr(scale, 'dtype'):
scale = scale.item()
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag/scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real/scale.imag + 1):
return complex(0., scale.imag)
return scale
else:
return scale.real
def maybe_simple_fraction(p, max_denominator=100):
"""Fraction very close to x with denominator at most max_denominator.
The fraction has to be such that fraction/x is unity to within 4 ulp.
If such a fraction does not exist, returns the float number.
The algorithm is that of `fractions.Fraction.limit_denominator`, but
sped up by not creating a fraction to start with.
"""
if p == 0 or p.__class__ is int:
return p
n, d = p.as_integer_ratio()
a = n // d
# Normally, start with 0,1 and 1,0; here we have applied first iteration.
n0, d0 = 1, 0
n1, d1 = a, 1
while d1 <= max_denominator:
if _JUST_BELOW_UNITY <= n1/(d1*p) <= _JUST_ABOVE_UNITY:
return Fraction(n1, d1)
n, d = d, n-a*d
a = n // d
n0, n1 = n1, n0+a*n1
d0, d1 = d1, d0+a*d1
return p
def validate_power(p):
"""Convert a power to a floating point value, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted
"""
denom = getattr(p, 'denominator', None)
if denom is None:
try:
p = float(p)
except Exception:
if not np.isscalar(p):
raise ValueError("Quantities and Units may only be raised "
"to a scalar power")
else:
raise
# This returns either a (simple) Fraction or the same float.
p = maybe_simple_fraction(p)
# If still a float, nothing more to be done.
if isinstance(p, float):
return p
# Otherwise, check for simplifications.
denom = p.denominator
if denom == 1:
p = p.numerator
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
# If so, float does not lose precision and will speed things up.
p = float(p)
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction
(at least if it does not have a ridiculous denominator).
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (a.__class__ is not int and a.__class__ is not float and
isinstance(a, Fraction))
b_is_fraction = (b.__class__ is not int and b.__class__ is not float and
isinstance(b, Fraction))
if a_is_fraction and not b_is_fraction:
b = maybe_simple_fraction(b)
elif not a_is_fraction and b_is_fraction:
a = maybe_simple_fraction(a)
return a, b
def quantity_asanyarray(a, dtype=None):
from .quantity import Quantity
if not isinstance(a, np.ndarray) and not np.isscalar(a) and any(isinstance(x, Quantity) for x in a):
return Quantity(a, dtype=dtype)
else:
return np.asanyarray(a, dtype=dtype)
|
6e22b6f296084fcb81cf2a1e4acd1bbb4aa695238cf7a9cee400daa246530a39 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines colloquially used Imperial units. They are
available in the `astropy.units.imperial` namespace, but not in the
top-level `astropy.units` namespace, e.g.::
>>> import astropy.units as u
>>> mph = u.imperial.mile / u.hour
>>> mph
Unit("mi / h")
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> import astropy.units as u
>>> u.imperial.enable() # doctest: +SKIP
"""
from .core import UnitBase, def_unit
from . import si
_ns = globals()
###########################################################################
# LENGTH
def_unit(['inch'], 2.54 * si.cm, namespace=_ns,
doc="International inch")
def_unit(['ft', 'foot'], 12 * inch, namespace=_ns,
doc="International foot")
def_unit(['yd', 'yard'], 3 * ft, namespace=_ns,
doc="International yard")
def_unit(['mi', 'mile'], 5280 * ft, namespace=_ns,
doc="International mile")
def_unit(['mil', 'thou'], 0.001 * inch, namespace=_ns,
doc="Thousandth of an inch")
def_unit(['nmi', 'nauticalmile', 'NM'], 1852 * si.m, namespace=_ns,
doc="Nautical mile")
def_unit(['fur', 'furlong'], 660 * ft, namespace=_ns,
doc="Furlong")
###########################################################################
# AREAS
def_unit(['ac', 'acre'], 43560 * ft ** 2, namespace=_ns,
doc="International acre")
###########################################################################
# VOLUMES
def_unit(['gallon'], si.liter / 0.264172052, namespace=_ns,
doc="U.S. liquid gallon")
def_unit(['quart'], gallon / 4, namespace=_ns,
doc="U.S. liquid quart")
def_unit(['pint'], quart / 2, namespace=_ns,
doc="U.S. liquid pint")
def_unit(['cup'], pint / 2, namespace=_ns,
doc="U.S. customary cup")
def_unit(['foz', 'fluid_oz', 'fluid_ounce'], cup / 8, namespace=_ns,
doc="U.S. fluid ounce")
def_unit(['tbsp', 'tablespoon'], foz / 2, namespace=_ns,
doc="U.S. customary tablespoon")
def_unit(['tsp', 'teaspoon'], tbsp / 3, namespace=_ns,
doc="U.S. customary teaspoon")
###########################################################################
# MASS
def_unit(['oz', 'ounce'], 28.349523125 * si.g, namespace=_ns,
doc="International avoirdupois ounce: mass")
def_unit(['lb', 'lbm', 'pound'], 16 * oz, namespace=_ns,
doc="International avoirdupois pound: mass")
def_unit(['st', 'stone'], 14 * lb, namespace=_ns,
doc="International avoirdupois stone: mass")
def_unit(['ton'], 2000 * lb, namespace=_ns,
doc="International avoirdupois ton: mass")
def_unit(['slug'], 32.174049 * lb, namespace=_ns,
doc="slug: mass")
###########################################################################
# SPEED
def_unit(['kn', 'kt', 'knot', 'NMPH'], nmi / si.h, namespace=_ns,
doc="nautical unit of speed: 1 nmi per hour")
###########################################################################
# FORCE
def_unit('lbf', slug * ft * si.s**-2, namespace=_ns,
doc="Pound: force")
def_unit(['kip', 'kilopound'], 1000 * lbf, namespace=_ns,
doc="Kilopound: force")
##########################################################################
# ENERGY
def_unit(['BTU', 'btu'], 1.05505585 * si.kJ, namespace=_ns,
doc="British thermal unit")
def_unit(['cal', 'calorie'], 4.184 * si.J, namespace=_ns,
doc="Thermochemical calorie: pre-SI metric unit of energy")
def_unit(['kcal', 'Cal', 'Calorie', 'kilocal', 'kilocalorie'],
1000 * cal, namespace=_ns,
doc="Calorie: colloquial definition of Calorie")
##########################################################################
# PRESSURE
def_unit('psi', lbf * inch ** -2, namespace=_ns,
doc="Pound per square inch: pressure")
###########################################################################
# POWER
# Imperial units
def_unit(['hp', 'horsepower'], si.W / 0.00134102209, namespace=_ns,
doc="Electrical horsepower")
###########################################################################
# TEMPERATURE
def_unit(['deg_F', 'Fahrenheit'], namespace=_ns, doc='Degrees Fahrenheit',
format={'latex': r'{}^{\circ}F', 'unicode': 'Β°F'})
def_unit(['deg_R', 'Rankine'], namespace=_ns, doc='Rankine scale: absolute scale of thermodynamic temperature')
###########################################################################
# CLEANUP
del UnitBase
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable Imperial units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable Imperial
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(enable))
|
da2c0c67aa0ef24cf668a71f1e193d77284f17db328cafe168a9477cec0b0494 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines structured units and quantities.
"""
# Standard library
import operator
import numpy as np
from .core import Unit, UnitBase, UNITY
__all__ = ['StructuredUnit']
DTYPE_OBJECT = np.dtype('O')
def _names_from_dtype(dtype):
"""Recursively extract field names from a dtype."""
names = []
for name in dtype.names:
subdtype = dtype.fields[name][0]
if subdtype.names:
names.append([name, _names_from_dtype(subdtype)])
else:
names.append(name)
return tuple(names)
def _normalize_names(names):
"""Recursively normalize, inferring upper level names for unadorned tuples.
Generally, we want the field names to be organized like dtypes, as in
``(['pv', ('p', 'v')], 't')``. But we automatically infer upper
field names if the list is absent from items like ``(('p', 'v'), 't')``,
by concatenating the names inside the tuple.
"""
result = []
for name in names:
if isinstance(name, str) and len(name) > 0:
result.append(name)
elif (isinstance(name, list)
and len(name) == 2
and isinstance(name[0], str) and len(name[0]) > 0
and isinstance(name[1], tuple) and len(name[1]) > 0):
result.append([name[0], _normalize_names(name[1])])
elif isinstance(name, tuple) and len(name) > 0:
new_tuple = _normalize_names(name)
result.append([''.join([(i[0] if isinstance(i, list) else i)
for i in new_tuple]), new_tuple])
else:
raise ValueError(f'invalid entry {name!r}. Should be a name, '
'tuple of names, or 2-element list of the '
'form [name, tuple of names].')
return tuple(result)
class StructuredUnit:
"""Container for units for a structured Quantity.
Parameters
----------
units : unit-like, tuple of unit-like, or `~astropy.units.StructuredUnit`
Tuples can be nested. If a `~astropy.units.StructuredUnit` is passed
in, it will be returned unchanged unless different names are requested.
names : tuple of str, tuple or list; `~numpy.dtype`; or `~astropy.units.StructuredUnit`, optional
Field names for the units, possibly nested. Can be inferred from a
structured `~numpy.dtype` or another `~astropy.units.StructuredUnit`.
For nested tuples, by default the name of the upper entry will be the
concatenation of the names of the lower levels. One can pass in a
list with the upper-level name and a tuple of lower-level names to
avoid this. For tuples, not all levels have to be given; for any level
not passed in, default field names of 'f0', 'f1', etc., will be used.
Notes
-----
It is recommended to initialze the class indirectly, using
`~astropy.units.Unit`. E.g., ``u.Unit('AU,AU/day')``.
When combined with a structured array to produce a structured
`~astropy.units.Quantity`, array field names will take precedence.
Generally, passing in ``names`` is needed only if the unit is used
unattached to a `~astropy.units.Quantity` and one needs to access its
fields.
Examples
--------
Various ways to initialize a `~astropy.units.StructuredUnit`::
>>> import astropy.units as u
>>> su = u.Unit('(AU,AU/day),yr')
>>> su
Unit("((AU, AU / d), yr)")
>>> su.field_names
(['f0', ('f0', 'f1')], 'f1')
>>> su['f1']
Unit("yr")
>>> su2 = u.StructuredUnit(((u.AU, u.AU/u.day), u.yr), names=(('p', 'v'), 't'))
>>> su2 == su
True
>>> su2.field_names
(['pv', ('p', 'v')], 't')
>>> su3 = u.StructuredUnit((su2['pv'], u.day), names=(['p_v', ('p', 'v')], 't'))
>>> su3.field_names
(['p_v', ('p', 'v')], 't')
>>> su3.keys()
('p_v', 't')
>>> su3.values()
(Unit("(AU, AU / d)"), Unit("d"))
Structured units share most methods with regular units::
>>> su.physical_type
((PhysicalType('length'), PhysicalType({'speed', 'velocity'})), PhysicalType('time'))
>>> su.si
Unit("((1.49598e+11 m, 1.73146e+06 m / s), 3.15576e+07 s)")
"""
def __new__(cls, units, names=None):
dtype = None
if names is not None:
if isinstance(names, StructuredUnit):
dtype = names._units.dtype
names = names.field_names
elif isinstance(names, np.dtype):
if not names.fields:
raise ValueError('dtype should be structured, with fields.')
dtype = np.dtype([(name, DTYPE_OBJECT) for name in names.names])
names = _names_from_dtype(names)
else:
if not isinstance(names, tuple):
names = (names,)
names = _normalize_names(names)
if not isinstance(units, tuple):
units = Unit(units)
if isinstance(units, StructuredUnit):
# Avoid constructing a new StructuredUnit if no field names
# are given, or if all field names are the same already anyway.
if names is None or units.field_names == names:
return units
# Otherwise, turn (the upper level) into a tuple, for renaming.
units = units.values()
else:
# Single regular unit: make a tuple for iteration below.
units = (units,)
if names is None:
names = tuple(f'f{i}' for i in range(len(units)))
elif len(units) != len(names):
raise ValueError("lengths of units and field names must match.")
converted = []
for unit, name in zip(units, names):
if isinstance(name, list):
# For list, the first item is the name of our level,
# and the second another tuple of names, i.e., we recurse.
unit = cls(unit, name[1])
name = name[0]
else:
# We are at the lowest level. Check unit.
unit = Unit(unit)
if dtype is not None and isinstance(unit, StructuredUnit):
raise ValueError("units do not match in depth with field "
"names from dtype or structured unit.")
converted.append(unit)
self = super().__new__(cls)
if dtype is None:
dtype = np.dtype([((name[0] if isinstance(name, list) else name),
DTYPE_OBJECT) for name in names])
# Decay array to void so we can access by field name and number.
self._units = np.array(tuple(converted), dtype)[()]
return self
def __getnewargs__(self):
"""When de-serializing, e.g. pickle, start with a blank structure."""
return (), None
@property
def field_names(self):
"""Possibly nested tuple of the field names of the parts."""
return tuple(([name, unit.field_names]
if isinstance(unit, StructuredUnit) else name)
for name, unit in self.items())
# Allow StructuredUnit to be treated as an (ordered) mapping.
def __len__(self):
return len(self._units.dtype.names)
def __getitem__(self, item):
# Since we are based on np.void, indexing by field number works too.
return self._units[item]
def values(self):
return self._units.item()
def keys(self):
return self._units.dtype.names
def items(self):
return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
# Helpers for methods below.
def _recursively_apply(self, func, cls=None):
"""Apply func recursively.
Parameters
----------
func : callable
Function to apply to all parts of the structured unit,
recursing as needed.
cls : type, optional
If given, should be a subclass of `~numpy.void`. By default,
will return a new `~astropy.units.StructuredUnit` instance.
"""
results = np.array(tuple([func(part) for part in self.values()]),
self._units.dtype)[()]
if cls is not None:
return results.view((cls, results.dtype))
# Short-cut; no need to interpret field names, etc.
result = super().__new__(self.__class__)
result._units = results
return result
def _recursively_get_dtype(self, value, enter_lists=True):
"""Get structured dtype according to value, using our field names.
This is useful since ``np.array(value)`` would treat tuples as lower
levels of the array, rather than as elements of a structured array.
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``_get_converter``.
For the special value of ``UNITY``, all fields are assumed to be 1.0,
and hence this will return an all-float dtype.
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
if value is UNITY:
value = (UNITY,) * len(self)
elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
if isinstance(unit, StructuredUnit):
descr.append(
(name, unit._recursively_get_dtype(part, enter_lists=False)))
else:
# Got a part associated with a regular unit. Gets its dtype.
# Like for Quantity, we cast integers to float.
part = np.array(part)
part_dtype = part.dtype
if part_dtype.kind in 'iu':
part_dtype = np.dtype(float)
descr.append((name, part_dtype, part.shape))
return np.dtype(descr)
@property
def si(self):
"""The `StructuredUnit` instance in SI units."""
return self._recursively_apply(operator.attrgetter('si'))
@property
def cgs(self):
"""The `StructuredUnit` instance in cgs units."""
return self._recursively_apply(operator.attrgetter('cgs'))
# Needed to pass through Unit initializer, so might as well use it.
def _get_physical_type_id(self):
return self._recursively_apply(
operator.methodcaller('_get_physical_type_id'), cls=Structure)
@property
def physical_type(self):
"""Physical types of all the fields."""
return self._recursively_apply(
operator.attrgetter('physical_type'), cls=Structure)
def decompose(self, bases=set()):
"""The `StructuredUnit` composed of only irreducible units.
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
`~astropy.units.StructuredUnit`
With the unit for each field containing only irreducible units.
"""
return self._recursively_apply(
operator.methodcaller('decompose', bases=bases))
def is_equivalent(self, other, equivalencies=[]):
"""`True` if all fields are equivalent to the other's fields.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The structured unit to compare with, or what can initialize one.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
The list will be applied to all fields.
Returns
-------
bool
"""
try:
other = StructuredUnit(other)
except Exception:
return False
if len(self) != len(other):
return False
for self_part, other_part in zip(self.values(), other.values()):
if not self_part.is_equivalent(other_part,
equivalencies=equivalencies):
return False
return True
def _get_converter(self, other, equivalencies=[]):
if not isinstance(other, type(self)):
other = self.__class__(other, names=self)
converters = [self_part._get_converter(other_part,
equivalencies=equivalencies)
for (self_part, other_part) in zip(self.values(),
other.values())]
def converter(value):
if not hasattr(value, 'dtype'):
value = np.array(value, self._recursively_get_dtype(value))
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
# Index with empty tuple to decay array scalars to numpy void.
return result if result.shape else result[()]
return converter
def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
----------
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s).
Raises
------
UnitsError
If units are inconsistent
"""
if value is np._NoValue:
# We do not have UNITY as a default, since then the docstring
# would list 1.0 as default, yet one could not pass that in.
value = UNITY
return self._get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format='generic'):
"""Output the unit in the given format as a string.
Units are separated by commas.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
Notes
-----
Structured units can be written to all formats, but can be
re-read only with 'generic'.
"""
parts = [part.to_string(format) for part in self.values()]
out_fmt = '({})' if len(self) > 1 else '({},)'
if format == 'latex':
# Strip $ from parts and add them on the outside.
parts = [part[1:-1] for part in parts]
out_fmt = '$' + out_fmt + '$'
return out_fmt.format(', '.join(parts))
def _repr_latex_(self):
return self.to_string('latex')
__array_ufunc__ = None
def __mul__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict='silent')
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part * other for part in self.values())
return self.__class__(new_units, names=self)
if isinstance(other, StructuredUnit):
return NotImplemented
# Anything not like a unit, try initialising as a structured quantity.
try:
from .quantity import Quantity
return Quantity(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, str):
try:
other = Unit(other, parse_strict='silent')
except Exception:
return NotImplemented
if isinstance(other, UnitBase):
new_units = tuple(part / other for part in self.values())
return self.__class__(new_units, names=self)
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __str__(self):
return self.to_string()
def __repr__(self):
return f'Unit("{self.to_string()}")'
def __eq__(self, other):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() == other.values()
def __ne__(self, other):
if not isinstance(other, type(self)):
try:
other = StructuredUnit(other)
except Exception:
return NotImplemented
return self.values() != other.values()
class Structure(np.void):
"""Single element structure for physical type IDs, etc.
Behaves like a `~numpy.void` and thus mostly like a tuple which can also
be indexed with field names, but overrides ``__eq__`` and ``__ne__`` to
compare only the contents, not the field names. Furthermore, this way no
`FutureWarning` about comparisons is given.
"""
# Note that it is important for physical type IDs to not be stored in a
# tuple, since then the physical types would be treated as alternatives in
# :meth:`~astropy.units.UnitBase.is_equivalent`. (Of course, in that
# case, they could also not be indexed by name.)
def __eq__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() == other
def __ne__(self, other):
if isinstance(other, np.void):
other = other.item()
return self.item() != other
|
9665f54dbb8af288bd88ee6f2c2320e696aaf7df49826bfbd45b255a44782732 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units used in the CDS format, both the units
defined in `Centre de DonnΓ©es astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_ format and the `complete
set of supported units <https://vizier.u-strasbg.fr/viz-bin/Unit>`_.
This format is used by VOTable up to version 1.2.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.cds`
module::
>>> from astropy.units import cds
>>> q = 10. * cds.lyr # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import cds
>>> cds.enable() # doctest: +SKIP
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
import numpy as np
from . import core
from astropy import units as u
from astropy.constants import si as _si
# The CDS format also supports power-of-2 prefixes as defined here:
# http://physics.nist.gov/cuu/Units/binary.html
prefixes = core.si_prefixes + core.binary_prefixes
# CDS only uses the short prefixes
prefixes = [(short, short, factor) for (short, long, factor) in prefixes]
# The following units are defined in alphabetical order, directly from
# here: https://vizier.u-strasbg.fr/viz-bin/Unit
mapping = [
(['A'], u.A, "Ampere"),
(['a'], u.a, "year", ['P']),
(['a0'], _si.a0, "Bohr radius"),
(['al'], u.lyr, "Light year", ['c', 'd']),
(['lyr'], u.lyr, "Light year"),
(['alpha'], _si.alpha, "Fine structure constant"),
((['AA', 'Γ
'], ['Angstrom', 'Angstroem']), u.AA, "Angstrom"),
(['arcmin', 'arcm'], u.arcminute, "minute of arc"),
(['arcsec', 'arcs'], u.arcsecond, "second of arc"),
(['atm'], _si.atm, "atmosphere"),
(['AU', 'au'], u.au, "astronomical unit"),
(['bar'], u.bar, "bar"),
(['barn'], u.barn, "barn"),
(['bit'], u.bit, "bit"),
(['byte'], u.byte, "byte"),
(['C'], u.C, "Coulomb"),
(['c'], _si.c, "speed of light", ['p']),
(['cal'], 4.1854 * u.J, "calorie"),
(['cd'], u.cd, "candela"),
(['ct'], u.ct, "count"),
(['D'], u.D, "Debye (dipole)"),
(['d'], u.d, "Julian day", ['c']),
((['deg', 'Β°'], ['degree']), u.degree, "degree"),
(['dyn'], u.dyn, "dyne"),
(['e'], _si.e, "electron charge", ['m']),
(['eps0'], _si.eps0, "electric constant"),
(['erg'], u.erg, "erg"),
(['eV'], u.eV, "electron volt"),
(['F'], u.F, "Farad"),
(['G'], _si.G, "Gravitation constant"),
(['g'], u.g, "gram"),
(['gauss'], u.G, "Gauss"),
(['geoMass', 'Mgeo'], u.M_earth, "Earth mass"),
(['H'], u.H, "Henry"),
(['h'], u.h, "hour", ['p']),
(['hr'], u.h, "hour"),
(['\\h'], _si.h, "Planck constant"),
(['Hz'], u.Hz, "Hertz"),
(['inch'], 0.0254 * u.m, "inch"),
(['J'], u.J, "Joule"),
(['JD'], u.d, "Julian day", ['M']),
(['jovMass', 'Mjup'], u.M_jup, "Jupiter mass"),
(['Jy'], u.Jy, "Jansky"),
(['K'], u.K, "Kelvin"),
(['k'], _si.k_B, "Boltzmann"),
(['l'], u.l, "litre", ['a']),
(['lm'], u.lm, "lumen"),
(['Lsun', 'solLum'], u.solLum, "solar luminosity"),
(['lx'], u.lx, "lux"),
(['m'], u.m, "meter"),
(['mag'], u.mag, "magnitude"),
(['me'], _si.m_e, "electron mass"),
(['min'], u.minute, "minute"),
(['MJD'], u.d, "Julian day"),
(['mmHg'], 133.322387415 * u.Pa, "millimeter of mercury"),
(['mol'], u.mol, "mole"),
(['mp'], _si.m_p, "proton mass"),
(['Msun', 'solMass'], u.solMass, "solar mass"),
((['mu0', 'Β΅0'], []), _si.mu0, "magnetic constant"),
(['muB'], _si.muB, "Bohr magneton"),
(['N'], u.N, "Newton"),
(['Ohm'], u.Ohm, "Ohm"),
(['Pa'], u.Pa, "Pascal"),
(['pc'], u.pc, "parsec"),
(['ph'], u.ph, "photon"),
(['pi'], u.Unit(np.pi), "Ο"),
(['pix'], u.pix, "pixel"),
(['ppm'], u.Unit(1e-6), "parts per million"),
(['R'], _si.R, "gas constant"),
(['rad'], u.radian, "radian"),
(['Rgeo'], _si.R_earth, "Earth equatorial radius"),
(['Rjup'], _si.R_jup, "Jupiter equatorial radius"),
(['Rsun', 'solRad'], u.solRad, "solar radius"),
(['Ry'], u.Ry, "Rydberg"),
(['S'], u.S, "Siemens"),
(['s', 'sec'], u.s, "second"),
(['sr'], u.sr, "steradian"),
(['Sun'], u.Sun, "solar unit"),
(['T'], u.T, "Tesla"),
(['t'], 1e3 * u.kg, "metric tonne", ['c']),
(['u'], _si.u, "atomic mass", ['da', 'a']),
(['V'], u.V, "Volt"),
(['W'], u.W, "Watt"),
(['Wb'], u.Wb, "Weber"),
(['yr'], u.a, "year"),
]
for entry in mapping:
if len(entry) == 3:
names, unit, doc = entry
excludes = []
else:
names, unit, doc, excludes = entry
core.def_unit(names, unit, prefixes=prefixes, namespace=_ns, doc=doc,
exclude_prefixes=excludes)
core.def_unit(['Β΅as'], u.microarcsecond,
doc="microsecond of arc", namespace=_ns)
core.def_unit(['mas'], u.milliarcsecond,
doc="millisecond of arc", namespace=_ns)
core.def_unit(['---', '-'], u.dimensionless_unscaled,
doc="dimensionless and unscaled", namespace=_ns)
core.def_unit(['%'], u.percent,
doc="percent", namespace=_ns)
# The Vizier "standard" defines this in units of "kg s-3", but
# that may not make a whole lot of sense, so here we just define
# it as its own new disconnected unit.
core.def_unit(['Crab'], prefixes=prefixes, namespace=_ns,
doc="Crab (X-ray) flux")
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable CDS units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`. This will disable
all of the "default" `astropy.units` units, since there
are some namespace clashes between the two.
This may be used with the ``with`` statement to enable CDS
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import set_enabled_units
# Local import to avoid polluting namespace
import inspect
return set_enabled_units(inspect.getmodule(enable))
|
b7eb194e81a6fa696fa02e0d473040aa4a8081a84082d9f7696d577596c75616 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .utils import (is_effectively_unity, sanitize_scale, validate_power,
resolve_fractions)
from . import format as unit_format
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'set_enabled_aliases', 'add_enabled_aliases',
'dimensionless_unscaled', 'one',
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}.")
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}.")
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
def __str__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
unit = self.decompose()
r = zip([x.name for x in unit.bases], unit.powers)
# bases and powers are already sorted in a unique way
# r.sort()
r = tuple(r)
return r
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(">> is not implemented. Did you mean to convert "
"to a Quantity with unit {} using '<<'?".format(self),
AstropyWarning)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = ([str(self.scale)] +
[x.name for x in self.bases] +
[str(x) for x in self.powers])
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized hash since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop('_hash', None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other),
equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [set([unit]), set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units")
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=None):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ('Primary name', 'Unit definition', 'Aliases')
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = 'There are no equivalent units'
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
[f'{x} ,' for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES))
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(
data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = 'irreducible'
processed_equiv_units.append(
(u.name, irred, ', '.join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = set(
x.bases[0] for x in results if len(x.bases) == 1)
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {!r} already exists in "
"given namespace ({!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__())
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases")
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode('ascii', 'replace')
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = __lt__ = \
__gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s="", represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{}' did not parse as {}unit: {} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
.format(s, format_clause, str(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers,
_error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale ** power
self._bases = unit.bases
self._powers = [operator.mul(*resolve_fractions(p, power))
for p in unit.powers]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose,
bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f'Unit(dimensionless with a scale of {self._scale})'
else:
return 'Unit(dimensionless)'
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale ** p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = '\N{MICRO SIGN}' + u.get_format_name('unicode')
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1. * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
371fafa4888342a351cf03ba1364af408b431c316fa826efc66ab9ead0f5a738 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
from astropy.constants import si as _si
from .core import UnitBase, Unit, def_unit
import numpy as _numpy
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={'generic': '%', 'console': '%', 'cds': '%',
'latex': r'\%', 'unicode': '%'})
###########################################################################
# LENGTH
def_unit(['m', 'meter'], namespace=_ns, prefixes=True,
doc="meter: base unit of length in SI")
def_unit(['micron'], um, namespace=_ns,
doc="micron: alias for micrometer (um)",
format={'latex': r'\mu m', 'unicode': '\N{MICRO SIGN}m'})
def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns,
doc="Γ₯ngstrΓΆm: 10 ** -10 m",
prefixes=[(['m', 'milli'], ['milli', 'm'], 1.e-3)],
format={'latex': r'\mathring{A}', 'unicode': 'Γ
',
'vounit': 'Angstrom'})
###########################################################################
# VOLUMES
def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True,
format={'latex': r'\mathcal{l}', 'unicode': 'β'},
doc="liter: metric unit of volume")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['rad', 'radian'], namespace=_ns, prefixes=True,
doc="radian: angular measurement of the ratio between the length "
"on an arc and its radius")
def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={'latex': r'{}^{\circ}', 'unicode': 'Β°'})
def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={'latex': r'{}^{h}', 'unicode': 'Κ°'})
def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={'latex': r'{}^{\prime}', 'unicode': 'β²'})
def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns,
prefixes=True,
doc="arc second: angular measurement")
# These special formats should only be used for the non-prefix versions
arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': 'β³'}
def_unit(['mas'], 0.001 * arcsec, namespace=_ns,
doc="milli arc second: angular measurement")
def_unit(['uas'], 0.000001 * arcsec, namespace=_ns,
doc="micro arc second: angular measurement",
format={'latex': r'\mu as', 'unicode': 'ΞΌas'})
def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True,
doc="steradian: unit of solid angle in SI")
###########################################################################
# TIME
def_unit(['s', 'second'], namespace=_ns, prefixes=True,
exclude_prefixes=['a'],
doc="second: base unit of time in SI.")
def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns)
def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True,
exclude_prefixes=['p'])
def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True,
exclude_prefixes=['c', 'y'])
def_unit(['sday'], 86164.09053 * s, namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.")
def_unit(['wk', 'week'], 7 * day, namespace=_ns)
def_unit(['fortnight'], 2 * wk, namespace=_ns)
def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True,
exclude_prefixes=['P'])
def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True)
###########################################################################
# FREQUENCY
def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True,
doc="Frequency")
###########################################################################
# MASS
def_unit(['kg', 'kilogram'], namespace=_ns,
doc="kilogram: base unit of mass in SI.")
def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True,
exclude_prefixes=['k', 'kilo'])
def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns,
doc="Metric tonne")
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(['mol', 'mole'], namespace=_ns, prefixes=True,
doc="mole: amount of a chemical substance in SI.")
###########################################################################
# TEMPERATURE
def_unit(
['K', 'Kelvin'], namespace=_ns, prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.")
def_unit(
['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius',
format={'latex': r'{}^{\circ}C', 'unicode': 'Β°C'})
###########################################################################
# FORCE
def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns,
prefixes=True, doc="Newton: force")
##########################################################################
# ENERGY
def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True,
doc="Joule: energy")
def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True,
doc="Electron Volt")
##########################################################################
# PRESSURE
def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True,
doc="Pascal: pressure")
###########################################################################
# POWER
def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True,
doc="Watt: power")
###########################################################################
# ELECTRICAL
def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True,
doc="ampere: base unit of electric current in SI")
def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True,
doc="coulomb: electric charge")
def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True,
doc="Volt: electric potential or electromotive force")
def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True,
doc="Ohm: electrical resistance",
format={'latex': r'\Omega', 'unicode': 'Ξ©'})
def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns,
prefixes=True, doc="Siemens: electrical conductance")
def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True,
doc="Farad: electrical capacitance")
###########################################################################
# MAGNETIC
def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True,
doc="Weber: magnetic flux")
def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True,
doc="Tesla: magnetic flux density")
def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True,
doc="Henry: inductance")
###########################################################################
# ILLUMINATION
def_unit(['cd', 'candela'], namespace=_ns, prefixes=True,
doc="candela: base unit of luminous intensity in SI")
def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True,
doc="lumen: luminous flux")
def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True,
doc="lux: luminous emittance")
###########################################################################
# RADIOACTIVITY
def_unit(['Bq', 'becquerel'], 1 / s, namespace=_ns, prefixes=False,
doc="becquerel: unit of radioactivity")
def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False,
doc="curie: unit of radioactivity")
###########################################################################
# BASES
bases = set([m, s, kg, A, cd, rad, K, mol])
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
538a507c81dd48451201514cf90b447e6712ca63b6aa4d962c08bcf52642b898 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines magnitude zero points and related photometric quantities.
The corresponding magnitudes are given in the description of each unit
(the actual definitions are in `~astropy.units.function.logarithmic`).
"""
import numpy as _numpy
from .core import UnitBase, def_unit, Unit
from astropy.constants import si as _si
from . import cgs, si, astrophys
_ns = globals()
def_unit(['Bol', 'L_bol'], _si.L_bol0, namespace=_ns, prefixes=False,
doc="Luminosity corresponding to absolute bolometric magnitude zero "
"(magnitude ``M_bol``).")
def_unit(['bol', 'f_bol'], _si.L_bol0 / (4 * _numpy.pi * (10.*astrophys.pc)**2),
namespace=_ns, prefixes=False, doc="Irradiance corresponding to "
"appparent bolometric magnitude zero (magnitude ``m_bol``).")
def_unit(['AB', 'ABflux'], 10.**(48.6/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.Hz,
namespace=_ns, prefixes=False,
doc="AB magnitude zero flux density (magnitude ``ABmag``).")
def_unit(['ST', 'STflux'], 10.**(21.1/-2.5) * cgs.erg * cgs.cm**-2 / si.s / si.AA,
namespace=_ns, prefixes=False,
doc="ST magnitude zero flux density (magnitude ``STmag``).")
def_unit(['mgy', 'maggy'],
namespace=_ns, prefixes=[(['n'], ['nano'], 1e-9)],
doc="Maggies - a linear flux unit that is the flux for a mag=0 object."
"To tie this onto a specific calibrated unit system, the "
"zero_point_flux equivalency should be used.")
def zero_point_flux(flux0):
"""
An equivalency for converting linear flux units ("maggys") defined relative
to a standard source into a standardized system.
Parameters
----------
flux0 : `~astropy.units.Quantity`
The flux of a magnitude-0 object in the "maggy" system.
"""
flux_unit0 = Unit(flux0)
return [(maggy, flux_unit0)]
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del cgs, si, astrophys
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
0061167b3e4bd1f6fb17d8d99978a1e33af18cbfab566d5b5f3324597cd0cc4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A set of standard astronomical equivalencies."""
from collections import UserList
# THIRD-PARTY
import numpy as np
import warnings
# LOCAL
from astropy.constants import si as _si
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import isiterable
from . import si
from . import cgs
from . import astrophys
from . import misc
from .function import units as function_units
from . import dimensionless_unscaled
from .core import UnitsError, Unit
__all__ = ['parallax', 'spectral', 'spectral_density', 'doppler_radio',
'doppler_optical', 'doppler_relativistic', 'doppler_redshift', 'mass_energy',
'brightness_temperature', 'thermodynamic_temperature',
'beam_angular_area', 'dimensionless_angles', 'logarithmic',
'temperature', 'temperature_energy', 'molar_mass_amu',
'pixel_scale', 'plate_scale', "Equivalency"]
class Equivalency(UserList):
"""
A container for a units equivalency.
Attributes
----------
name: `str`
The name of the equivalency.
kwargs: `dict`
Any positional or keyword arguments used to make the equivalency.
"""
def __init__(self, equiv_list, name='', kwargs=None):
self.data = equiv_list
self.name = [name]
self.kwargs = [kwargs] if kwargs is not None else [dict()]
def __add__(self, other):
if isinstance(other, Equivalency):
new = super().__add__(other)
new.name = self.name[:] + other.name
new.kwargs = self.kwargs[:] + other.kwargs
return new
else:
return self.data.__add__(other)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.name == other.name and
self.kwargs == other.kwargs)
def dimensionless_angles():
"""Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1).
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the angle is raised,
and independent of whether it is part of a more complicated unit.
"""
return Equivalency([(si.radian, None)], "dimensionless_angles")
def logarithmic():
"""Allow logarithmic units to be converted to dimensionless fractions"""
return Equivalency([
(dimensionless_unscaled, function_units.dex,
np.log10, lambda x: 10.**x)
], "logarithmic")
def parallax():
"""
Returns a list of equivalence pairs that handle the conversion
between parallax angle and distance.
"""
def parallax_converter(x):
x = np.asanyarray(x)
d = 1 / x
if isiterable(d):
d[d < 0] = np.nan
return d
else:
if d < 0:
return np.array(np.nan)
else:
return d
return Equivalency([
(si.arcsecond, astrophys.parsec, parallax_converter)
], "parallax")
def spectral():
"""
Returns a list of equivalence pairs that handle spectral
wavelength, wave number, frequency, and energy equivalencies.
Allows conversions between wavelength units, wave number units,
frequency units, and energy units as they relate to light.
There are two types of wave number:
* spectroscopic - :math:`1 / \\lambda` (per meter)
* angular - :math:`2 \\pi / \\lambda` (radian per meter)
"""
hc = _si.h.value * _si.c.value
two_pi = 2.0 * np.pi
inv_m_spec = si.m ** -1
inv_m_ang = si.radian / si.m
return Equivalency([
(si.m, si.Hz, lambda x: _si.c.value / x),
(si.m, si.J, lambda x: hc / x),
(si.Hz, si.J, lambda x: _si.h.value * x, lambda x: x / _si.h.value),
(si.m, inv_m_spec, lambda x: 1.0 / x),
(si.Hz, inv_m_spec, lambda x: x / _si.c.value,
lambda x: _si.c.value * x),
(si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),
(inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),
(si.m, inv_m_ang, lambda x: two_pi / x),
(si.Hz, inv_m_ang, lambda x: two_pi * x / _si.c.value,
lambda x: _si.c.value * x / two_pi),
(si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi)
], "spectral")
def spectral_density(wav, factor=None):
"""
Returns a list of equivalence pairs that handle spectral density
with regard to wavelength and frequency.
Parameters
----------
wav : `~astropy.units.Quantity`
`~astropy.units.Quantity` associated with values being converted
(e.g., wavelength or frequency).
Notes
-----
The ``factor`` argument is left for backward-compatibility with the syntax
``spectral_density(unit, factor)`` but users are encouraged to use
``spectral_density(factor * unit)`` instead.
"""
from .core import UnitBase
if isinstance(wav, UnitBase):
if factor is None:
raise ValueError(
'If `wav` is specified as a unit, `factor` should be set')
wav = factor * wav # Convert to Quantity
c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s
h_cgs = _si.h.cgs.value # erg * s
hc = c_Aps * h_cgs
# flux density
f_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s
f_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s
nu_f_nu = cgs.erg / si.cm ** 2 / si.s
la_f_la = nu_f_nu
phot_f_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA)
phot_f_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz)
la_phot_f_la = astrophys.photon / (si.cm ** 2 * si.s)
# luminosity density
L_nu = cgs.erg / si.s / si.Hz
L_la = cgs.erg / si.s / si.angstrom
nu_L_nu = cgs.erg / si.s
la_L_la = nu_L_nu
phot_L_la = astrophys.photon / (si.s * si.AA)
phot_L_nu = astrophys.photon / (si.s * si.Hz)
# surface brightness (flux equiv)
S_la = cgs.erg / si.angstrom / si.cm ** 2 / si.s / si.sr
S_nu = cgs.erg / si.Hz / si.cm ** 2 / si.s / si.sr
nu_S_nu = cgs.erg / si.cm ** 2 / si.s / si.sr
la_S_la = nu_S_nu
phot_S_la = astrophys.photon / (si.cm ** 2 * si.s * si.AA * si.sr)
phot_S_nu = astrophys.photon / (si.cm ** 2 * si.s * si.Hz * si.sr)
# surface brightness (luminosity equiv)
SL_nu = cgs.erg / si.s / si.Hz / si.sr
SL_la = cgs.erg / si.s / si.angstrom / si.sr
nu_SL_nu = cgs.erg / si.s / si.sr
la_SL_la = nu_SL_nu
phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr)
phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr)
def converter(x):
return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def iconverter(x):
return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps)
def converter_f_nu_to_nu_f_nu(x):
return x * wav.to_value(si.Hz, spectral())
def iconverter_f_nu_to_nu_f_nu(x):
return x / wav.to_value(si.Hz, spectral())
def converter_f_la_to_la_f_la(x):
return x * wav.to_value(si.AA, spectral())
def iconverter_f_la_to_la_f_la(x):
return x / wav.to_value(si.AA, spectral())
def converter_phot_f_la_to_f_la(x):
return hc * x / wav.to_value(si.AA, spectral())
def iconverter_phot_f_la_to_f_la(x):
return x * wav.to_value(si.AA, spectral()) / hc
def converter_phot_f_la_to_f_nu(x):
return h_cgs * x * wav.to_value(si.AA, spectral())
def iconverter_phot_f_la_to_f_nu(x):
return x / (wav.to_value(si.AA, spectral()) * h_cgs)
def converter_phot_f_la_phot_f_nu(x):
return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps
def iconverter_phot_f_la_phot_f_nu(x):
return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2
converter_phot_f_nu_to_f_nu = converter_phot_f_la_to_f_la
iconverter_phot_f_nu_to_f_nu = iconverter_phot_f_la_to_f_la
def converter_phot_f_nu_to_f_la(x):
return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3
def iconverter_phot_f_nu_to_f_la(x):
return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps)
# for luminosity density
converter_L_nu_to_nu_L_nu = converter_f_nu_to_nu_f_nu
iconverter_L_nu_to_nu_L_nu = iconverter_f_nu_to_nu_f_nu
converter_L_la_to_la_L_la = converter_f_la_to_la_f_la
iconverter_L_la_to_la_L_la = iconverter_f_la_to_la_f_la
converter_phot_L_la_to_L_la = converter_phot_f_la_to_f_la
iconverter_phot_L_la_to_L_la = iconverter_phot_f_la_to_f_la
converter_phot_L_la_to_L_nu = converter_phot_f_la_to_f_nu
iconverter_phot_L_la_to_L_nu = iconverter_phot_f_la_to_f_nu
converter_phot_L_la_phot_L_nu = converter_phot_f_la_phot_f_nu
iconverter_phot_L_la_phot_L_nu = iconverter_phot_f_la_phot_f_nu
converter_phot_L_nu_to_L_nu = converter_phot_f_nu_to_f_nu
iconverter_phot_L_nu_to_L_nu = iconverter_phot_f_nu_to_f_nu
converter_phot_L_nu_to_L_la = converter_phot_f_nu_to_f_la
iconverter_phot_L_nu_to_L_la = iconverter_phot_f_nu_to_f_la
return Equivalency([
# flux
(f_la, f_nu, converter, iconverter),
(f_nu, nu_f_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu),
(f_la, la_f_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la),
(phot_f_la, f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la),
(phot_f_la, f_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu),
(phot_f_la, phot_f_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu),
(phot_f_nu, f_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu),
(phot_f_nu, f_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la),
# integrated flux
(la_phot_f_la, la_f_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la),
# luminosity
(L_la, L_nu, converter, iconverter),
(L_nu, nu_L_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu),
(L_la, la_L_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la),
(phot_L_la, L_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la),
(phot_L_la, L_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu),
(phot_L_la, phot_L_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu),
(phot_L_nu, L_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu),
(phot_L_nu, L_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la),
# surface brightness (flux equiv)
(S_la, S_nu, converter, iconverter),
(S_nu, nu_S_nu, converter_f_nu_to_nu_f_nu, iconverter_f_nu_to_nu_f_nu),
(S_la, la_S_la, converter_f_la_to_la_f_la, iconverter_f_la_to_la_f_la),
(phot_S_la, S_la, converter_phot_f_la_to_f_la, iconverter_phot_f_la_to_f_la),
(phot_S_la, S_nu, converter_phot_f_la_to_f_nu, iconverter_phot_f_la_to_f_nu),
(phot_S_la, phot_S_nu, converter_phot_f_la_phot_f_nu, iconverter_phot_f_la_phot_f_nu),
(phot_S_nu, S_nu, converter_phot_f_nu_to_f_nu, iconverter_phot_f_nu_to_f_nu),
(phot_S_nu, S_la, converter_phot_f_nu_to_f_la, iconverter_phot_f_nu_to_f_la),
# surface brightness (luminosity equiv)
(SL_la, SL_nu, converter, iconverter),
(SL_nu, nu_SL_nu, converter_L_nu_to_nu_L_nu, iconverter_L_nu_to_nu_L_nu),
(SL_la, la_SL_la, converter_L_la_to_la_L_la, iconverter_L_la_to_la_L_la),
(phot_SL_la, SL_la, converter_phot_L_la_to_L_la, iconverter_phot_L_la_to_L_la),
(phot_SL_la, SL_nu, converter_phot_L_la_to_L_nu, iconverter_phot_L_la_to_L_nu),
(phot_SL_la, phot_SL_nu, converter_phot_L_la_phot_L_nu, iconverter_phot_L_la_phot_L_nu),
(phot_SL_nu, SL_nu, converter_phot_L_nu_to_L_nu, iconverter_phot_L_nu_to_L_nu),
(phot_SL_nu, SL_la, converter_phot_L_nu_to_L_la, iconverter_phot_L_nu_to_L_la),
], "spectral_density", {'wav': wav, 'factor': factor})
def doppler_radio(rest):
r"""
Return the equivalency pairs for the radio convention for velocity.
The radio convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> radio_CO_equiv = u.doppler_radio(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv)
>>> radio_velocity # doctest: +FLOAT_CMP
<Quantity -31.209092088877583 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq-x) / (restfreq) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq * (1-voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x-restwav) / (x) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return restwav * ckms / (ckms-x)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return (resten-x) / (resten) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x/ckms
return resten * (1-voverc)
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_radio", {'rest': rest})
def doppler_optical(rest):
r"""
Return the equivalency pairs for the optical convention for velocity.
The optical convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> optical_CO_equiv = u.doppler_optical(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv)
>>> optical_velocity # doctest: +FLOAT_CMP
<Quantity -31.20584348799674 km / s>
"""
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return ckms * (restfreq-x) / x
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq / (1+voverc)
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return ckms * (x/restwav-1)
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x/ckms
return restwav * (1+voverc)
def to_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
return ckms * (resten-x) / x
def from_vel_en(x):
resten = rest.to_value(si.eV, equivalencies=spectral())
voverc = x/ckms
return resten / (1+voverc)
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_optical", {'rest': rest})
def doppler_relativistic(rest):
r"""
Return the equivalency pairs for the relativistic convention for velocity.
The full relativistic convention for the relation between velocity and frequency is:
:math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}`
Parameters
----------
rest : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number).
References
----------
`NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_
Examples
--------
>>> import astropy.units as u
>>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz
>>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq)
>>> measured_freq = 115.2832*u.GHz
>>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv)
>>> relativistic_velocity # doctest: +FLOAT_CMP
<Quantity -31.207467619351537 km / s>
>>> measured_velocity = 1250 * u.km/u.s
>>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv)
>>> relativistic_frequency # doctest: +FLOAT_CMP
<Quantity 114.79156866993588 GHz>
>>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv)
>>> relativistic_wavelength # doctest: +FLOAT_CMP
<Quantity 2.6116243681798923 mm>
""" # noqa: E501
assert_is_spectral_unit(rest)
ckms = _si.c.to_value('km/s')
def to_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
return (restfreq**2-x**2) / (restfreq**2+x**2) * ckms
def from_vel_freq(x):
restfreq = rest.to_value(si.Hz, equivalencies=spectral())
voverc = x/ckms
return restfreq * ((1-voverc) / (1+(voverc)))**0.5
def to_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
return (x**2-restwav**2) / (restwav**2+x**2) * ckms
def from_vel_wav(x):
restwav = rest.to_value(si.AA, spectral())
voverc = x/ckms
return restwav * ((1+voverc) / (1-voverc))**0.5
def to_vel_en(x):
resten = rest.to_value(si.eV, spectral())
return (resten**2-x**2) / (resten**2+x**2) * ckms
def from_vel_en(x):
resten = rest.to_value(si.eV, spectral())
voverc = x/ckms
return resten * ((1-voverc) / (1+(voverc)))**0.5
return Equivalency([(si.Hz, si.km/si.s, to_vel_freq, from_vel_freq),
(si.AA, si.km/si.s, to_vel_wav, from_vel_wav),
(si.eV, si.km/si.s, to_vel_en, from_vel_en),
], "doppler_relativistic", {'rest': rest})
def doppler_redshift():
"""
Returns the equivalence between Doppler redshift (unitless) and radial velocity.
.. note::
This equivalency is not compatible with cosmological
redshift in `astropy.cosmology.units`.
"""
rv_unit = si.km / si.s
C_KMS = _si.c.to_value(rv_unit)
def convert_z_to_rv(z):
zponesq = (1 + z) ** 2
return C_KMS * (zponesq - 1) / (zponesq + 1)
def convert_rv_to_z(rv):
beta = rv / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
return Equivalency([(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)],
"doppler_redshift")
def molar_mass_amu():
"""
Returns the equivalence between amu and molar mass.
"""
return Equivalency([
(si.g/si.mol, misc.u)
], "molar_mass_amu")
def mass_energy():
"""
Returns a list of equivalence pairs that handle the conversion
between mass and energy.
"""
return Equivalency([(si.kg, si.J, lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.m ** 2, si.J / si.m ** 2,
lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.m ** 3, si.J / si.m ** 3,
lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
(si.kg / si.s, si.J / si.s, lambda x: x * _si.c.value ** 2,
lambda x: x / _si.c.value ** 2),
], "mass_energy")
def brightness_temperature(frequency, beam_area=None):
r"""
Defines the conversion between Jy/sr and "brightness temperature",
:math:`T_B`, in Kelvins. The brightness temperature is a unit very
commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy"
(Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google
books
<https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__).
:math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)`
If the input is in Jy/beam or Jy (assuming it came from a single beam), the
beam area is essential for this computation: the brightness temperature is
inversely proportional to the beam area.
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). The variable is named 'frequency' because it
is more commonly used in radio astronomy.
BACKWARD COMPATIBILITY NOTE: previous versions of the brightness
temperature equivalency used the keyword ``disp``, which is no longer
supported.
beam_area : `~astropy.units.Quantity` ['solid angle']
Beam area in angular units, i.e. steradian equivalent
Examples
--------
Arecibo C-band beam::
>>> import numpy as np
>>> from astropy import units as u
>>> beam_sigma = 50*u.arcsec
>>> beam_area = 2*np.pi*(beam_sigma)**2
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 3.526295144567176 K>
VLA synthetic beam::
>>> bmaj = 15*u.arcsec
>>> bmin = 15*u.arcsec
>>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
>>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2)
>>> freq = 5*u.GHz
>>> equiv = u.brightness_temperature(freq)
>>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 217.2658703625732 K>
Any generic surface brightness:
>>> surf_brightness = 1e6*u.MJy/u.sr
>>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP
<Quantity 130.1931904778803 K>
""" # noqa: E501
if frequency.unit.is_equivalent(si.sr):
if not beam_area.unit.is_equivalent(si.Hz):
raise ValueError("The inputs to `brightness_temperature` are "
"frequency and angular area.")
warnings.warn("The inputs to `brightness_temperature` have changed. "
"Frequency is now the first input, and angular area "
"is the second, optional input.",
AstropyDeprecationWarning)
frequency, beam_area = beam_area, frequency
nu = frequency.to(si.GHz, spectral())
if beam_area is not None:
beam = beam_area.to_value(si.sr)
def convert_Jy_to_K(x_jybm):
factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
return (x_jybm / beam / factor)
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
return (x_K * beam / factor)
return Equivalency([(astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),
(astrophys.Jy/astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501
else:
def convert_JySr_to_K(x_jysr):
factor = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value
return (x_jysr / factor)
def convert_K_to_JySr(x_K):
factor = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value
return (x_K / factor) # multiplied by 1x for 1 steradian
return Equivalency([(astrophys.Jy/si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],
"brightness_temperature", {'frequency': frequency, 'beam_area': beam_area}) # noqa: E501
def beam_angular_area(beam_area):
"""
Convert between the ``beam`` unit, which is commonly used to express the area
of a radio telescope resolution element, and an area on the sky.
This equivalency also supports direct conversion between ``Jy/beam`` and
``Jy/steradian`` units, since that is a common operation.
Parameters
----------
beam_area : unit-like
The area of the beam in angular area units (e.g., steradians)
Must have angular area equivalent units.
"""
return Equivalency([(astrophys.beam, Unit(beam_area)),
(astrophys.beam**-1, Unit(beam_area)**-1),
(astrophys.Jy/astrophys.beam, astrophys.Jy/Unit(beam_area))],
"beam_angular_area", {'beam_area': beam_area})
def thermodynamic_temperature(frequency, T_cmb=None):
r"""Defines the conversion between Jy/sr and "thermodynamic temperature",
:math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very
commonly used in cosmology. See eqn 8 in [1]
:math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)`
with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}`
where :math:`x = h \nu / k T`
Parameters
----------
frequency : `~astropy.units.Quantity`
The observed `spectral` equivalent `~astropy.units.Unit` (e.g.,
frequency or wavelength). Must have spectral units.
T_cmb : `~astropy.units.Quantity` ['temperature'] or None
The CMB temperature at z=0. If `None`, the default cosmology will be
used to get this temperature. Must have units of temperature.
Notes
-----
For broad band receivers, this conversion do not hold
as it highly depends on the frequency
References
----------
.. [1] Planck 2013 results. IX. HFI spectral response
https://arxiv.org/abs/1303.5070
Examples
--------
Planck HFI 143 GHz::
>>> from astropy import units as u
>>> from astropy.cosmology import Planck15
>>> freq = 143 * u.GHz
>>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0)
>>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP
<Quantity 0.37993172 MJy / sr>
"""
nu = frequency.to(si.GHz, spectral())
if T_cmb is None:
from astropy.cosmology import default_cosmology
T_cmb = default_cosmology.get().Tcmb0
def f(nu, T_cmb=T_cmb):
x = _si.h * nu / _si.k_B / T_cmb
return x**2 * np.exp(x) / np.expm1(x)**2
def convert_Jy_to_K(x_jybm):
factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy)
return x_jybm / factor
def convert_K_to_Jy(x_K):
factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(si.K)
return x_K / factor
return Equivalency([(astrophys.Jy/si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],
"thermodynamic_temperature", {'frequency': frequency, "T_cmb": T_cmb})
def temperature():
"""Convert between Kelvin, Celsius, Rankine and Fahrenheit here because
Unit and CompositeUnit cannot do addition or subtraction properly.
"""
from .imperial import deg_F, deg_R
return Equivalency([
(si.K, si.deg_C, lambda x: x - 273.15, lambda x: x + 273.15),
(si.deg_C, deg_F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),
(si.K, deg_F, lambda x: (x - 273.15) * 1.8 + 32.0,
lambda x: ((x - 32.0) / 1.8) + 273.15),
(deg_R, deg_F, lambda x: x - 459.67, lambda x: x + 459.67),
(deg_R, si.deg_C, lambda x: (x - 491.67) * (5/9), lambda x: x * 1.8 + 491.67),
(deg_R, si.K, lambda x: x * (5/9), lambda x: x * 1.8)], "temperature")
def temperature_energy():
"""Convert between Kelvin and keV(eV) to an equivalent amount."""
return Equivalency([
(si.K, si.eV, lambda x: x / (_si.e.value / _si.k_B.value),
lambda x: x * (_si.e.value / _si.k_B.value))], "temperature_energy")
def assert_is_spectral_unit(value):
try:
value.to(si.Hz, spectral())
except (AttributeError, UnitsError) as ex:
raise UnitsError("The 'rest' value must be a spectral equivalent "
"(frequency, wavelength, or energy).")
def pixel_scale(pixscale):
"""
Convert between pixel distances (in units of ``pix``) and other units,
given a particular ``pixscale``.
Parameters
----------
pixscale : `~astropy.units.Quantity`
The pixel scale either in units of <unit>/pixel or pixel/<unit>.
"""
decomposed = pixscale.unit.decompose()
dimensions = dict(zip(decomposed.bases, decomposed.powers))
pix_power = dimensions.get(misc.pix, 0)
if pix_power == -1:
physical_unit = Unit(pixscale * misc.pix)
elif pix_power == 1:
physical_unit = Unit(misc.pix / pixscale)
else:
raise UnitsError(
"The pixel scale unit must have"
" pixel dimensionality of 1 or -1.")
return Equivalency([(misc.pix, physical_unit)],
"pixel_scale", {'pixscale': pixscale})
def plate_scale(platescale):
"""
Convert between lengths (to be interpreted as lengths in the focal plane)
and angular units with a specified ``platescale``.
Parameters
----------
platescale : `~astropy.units.Quantity`
The pixel scale either in units of distance/pixel or distance/angle.
"""
if platescale.unit.is_equivalent(si.arcsec/si.m):
platescale_val = platescale.to_value(si.radian/si.m)
elif platescale.unit.is_equivalent(si.m/si.arcsec):
platescale_val = (1/platescale).to_value(si.radian/si.m)
else:
raise UnitsError("The pixel scale must be in angle/distance or "
"distance/angle")
return Equivalency([(si.m, si.radian, lambda d: d*platescale_val,
lambda rad: rad/platescale_val)],
"plate_scale", {'platescale': platescale})
# -------------------------------------------------------------------------
def __getattr__(attr):
if attr == "with_H0":
import warnings
from astropy.cosmology.units import with_H0
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
("`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead."),
AstropyDeprecationWarning)
return with_H0
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
5538c71dc43926cfd96507025b7e399519cdaff9d82bcc31c119b191143119e0 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import operator
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .structured import StructuredUnit
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
from .quantity_helper.function_helpers import (
SUBCLASS_SAFE_FUNCTIONS, FUNCTION_HELPERS, DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f'{val.value}'
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError("unit annotation is not a Unit or PhysicalType") from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn("Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed.")
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None and value.dtype.kind in 'iu':
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{}" as a {}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (dtype is None and not hasattr(value, 'dtype')
and isinstance(unit, StructuredUnit)):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(0), numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and value.dtype.kind in 'iuO':
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more since all use '
'should go through array_function. '
'Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if (isinstance(self._unit, StructuredUnit)
or isinstance(unit, StructuredUnit)):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
"{} instances require normal units, not {} instances."
.format(type(self).__name__, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter('si'))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter('cgs'))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member")
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'")
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except Exception:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(self.view(np.ndarray)[key], self.unit[key])
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f'{self.value}{self._unitstr:s}'
else:
# np.array2string properly formats arrays as well as scalars
return np.array2string(self.value, precision=precision,
floatmode="fixed") + self._unitstr
# else, for the moment we assume format="latex"
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({}{}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(conf.latex_array_threshold
if conf.latex_array_threshold > -1 else pops['threshold']),
formatter={'float_kind': float_formatter,
'complex_kind': complex_formatter},
max_line_width=np.inf,
separator=',~')
latex_value = latex_value.replace('...', r'\dots')
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return rf'{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}'
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=prefixstr)
return f'{prefixstr}{arrstr}{self._unitstr:s}>'
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format(f"{value}{self._unitstr:s}",
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.tolist()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if (value is np.ma.masked
or (value is np.ma.masked_print_option
and self.dtype.kind == 'O')):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(self.unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if self.dtype.kind == 'i' and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tobytes(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tobytes(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn("function '{}' is not known to astropy's Quantity. "
"Will run it anyway, hoping it will treat ndarray "
"subclasses correctly. Please raise an issue at "
"https://github.com/astropy/astropy/issues. "
.format(function.__name__), AstropyWarning)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity)
for t in types):
raise TypeError("the Quantity implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, keepdims=keepdims,
unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
keepdims=keepdims)
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.mean, axis, dtype, out=out,
keepdims=keepdims)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
f", so cannot set it to '{unit}'."))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaNβs as equal. If `True`, NaNs in ``a`` will
be considered equal to NaNβs in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaNβs as equal. If `True`, NaNs in ``a`` will
be considered equal to NaNβs in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
9a61460dce45f518dd4677b45d10e22c226fdb31db18829988e4958e6d6a6a54 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from numbers import Number
from collections.abc import Sequence
from functools import wraps
import numpy as np
from . import _typing as T
from .core import (Unit, UnitBase, UnitsError,
add_enabled_equivalencies, dimensionless_unscaled)
from .function.core import FunctionUnitBase
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
from .structured import StructuredUnit
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies,
strict_dimensionless=False):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (dimensionless_unscaled in allowed_units and not strict_dimensionless
and not hasattr(arg, "unit")):
if isinstance(arg, Number):
return
elif (isinstance(arg, np.ndarray)
and np.issubdtype(arg.dtype, np.number)):
return
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = ("a 'unit' attribute without an 'is_equivalent' method")
else:
error_msg = "no 'unit' attribute"
raise TypeError(f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead.")
else:
error_msg = (f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to")
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (param.name not in bound_args.arguments
and param.default is not param.empty):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if (isinstance(targets, str)
or not isinstance(targets, Sequence)):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [t for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies,
self.strict_dimensionless)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (ra if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra))
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [t for t in target
if isinstance(t, (str, UnitBase, PhysicalType))]
_validate_arg_value("return", wrapped_function.__name__,
return_, valid_targets, self.equivalencies,
self.strict_dimensionless)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
4842380e631836dca31c470023e22df4513f2eaa975e1608e79fa7d5cb0891ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The `astropy.nddata` subpackage provides the `~astropy.nddata.NDData`
class and related tools to manage n-dimensional array-based data (e.g.
CCD images, IFU Data, grid-based simulation data, ...). This is more than
just `numpy.ndarray` objects, because it provides metadata that cannot
be easily provided by a single array.
"""
from .nddata import *
from .nddata_base import *
from .nddata_withmixins import *
from .nduncertainty import *
from .flag_collection import *
from .decorators import *
from .mixins.ndarithmetic import *
from .mixins.ndslicing import *
from .mixins.ndio import *
from .blocks import *
from .compat import *
from .utils import *
from .ccddata import *
from .bitmask import *
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.nddata`.
"""
warn_unsupported_correlated = _config.ConfigItem(
True,
'Whether to issue a warning if `~astropy.nddata.NDData` arithmetic '
'is performed with uncertainties and the uncertainties do not '
'support the propagation of correlated uncertainties.'
)
warn_setting_unit_directly = _config.ConfigItem(
True,
'Whether to issue a warning when the `~astropy.nddata.NDData` unit '
'attribute is changed from a non-``None`` value to another value '
'that data values/uncertainties are not scaled with the unit change.'
)
conf = Conf()
|
ac8cd0f1c0ff0084933fc0a15b99db05ce918df64d6318ba9d53698032d3dcdc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDDataBase class.
from abc import ABCMeta, abstractmethod
__all__ = ['NDDataBase']
class NDDataBase(metaclass=ABCMeta):
"""Base metaclass that defines the interface for N-dimensional datasets
with associated meta information used in ``astropy``.
All properties and ``__init__`` have to be overridden in subclasses. See
`NDData` for a subclass that defines this interface on `numpy.ndarray`-like
``data``.
See also: https://docs.astropy.org/en/stable/nddata/
"""
@abstractmethod
def __init__(self):
pass
@property
@abstractmethod
def data(self):
"""The stored dataset.
"""
pass
@property
@abstractmethod
def mask(self):
"""Mask for the dataset.
Masks should follow the ``numpy`` convention that **valid** data points
are marked by ``False`` and **invalid** ones with ``True``.
"""
return None
@property
@abstractmethod
def unit(self):
"""Unit for the dataset.
"""
return None
@property
@abstractmethod
def wcs(self):
"""World coordinate system (WCS) for the dataset.
"""
return None
@property
@abstractmethod
def meta(self):
"""Additional meta information about the dataset.
Should be `dict`-like.
"""
return None
@property
@abstractmethod
def uncertainty(self):
"""Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``"std"`` for standard deviation or
``"var"`` for variance.
"""
return None
|
1c02db8bbce4cea4d85471b4eb1670b878c58d89eda7657a41d8a965666547a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements a class based on NDData with all Mixins.
"""
from .nddata import NDData
from .mixins.ndslicing import NDSlicingMixin
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
__all__ = ['NDDataRef']
class NDDataRef(NDArithmeticMixin, NDIOMixin, NDSlicingMixin, NDData):
"""Implements `NDData` with all Mixins.
This class implements a `NDData`-like container that supports reading and
writing as implemented in the ``astropy.io.registry`` and also slicing
(indexing) and simple arithmetics (add, subtract, divide and multiply).
Notes
-----
A key distinction from `NDDataArray` is that this class does not attempt
to provide anything that was not defined in any of the parent classes.
See also
--------
NDData
NDArithmeticMixin
NDSlicingMixin
NDIOMixin
Examples
--------
The mixins allow operation that are not possible with `NDData` or
`NDDataBase`, i.e. simple arithmetics::
>>> from astropy.nddata import NDDataRef, StdDevUncertainty
>>> import numpy as np
>>> data = np.ones((3,3), dtype=float)
>>> ndd1 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd2 = NDDataRef(data, uncertainty=StdDevUncertainty(data))
>>> ndd3 = ndd1.add(ndd2)
>>> ndd3.data # doctest: +FLOAT_CMP
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]])
>>> ndd3.uncertainty.array # doctest: +FLOAT_CMP
array([[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356],
[1.41421356, 1.41421356, 1.41421356]])
see `NDArithmeticMixin` for a complete list of all supported arithmetic
operations.
But also slicing (indexing) is possible::
>>> ndd4 = ndd3[1,:]
>>> ndd4.data # doctest: +FLOAT_CMP
array([2., 2., 2.])
>>> ndd4.uncertainty.array # doctest: +FLOAT_CMP
array([1.41421356, 1.41421356, 1.41421356])
See `NDSlicingMixin` for a description how slicing works (which attributes)
are sliced.
"""
pass
|
ba335620567022c3e090cfde84c671c4d0b7b2a86a9f15b06b6fb83ccee5f96f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
import numpy as np
from .decorators import support_nddata
__all__ = ['reshape_as_blocks', 'block_reduce', 'block_replicate']
def _process_block_inputs(data, block_size):
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if np.any(block_size <= 0):
raise ValueError('block_size elements must be strictly positive')
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('block_size must be a scalar or have the same '
'length as the number of data dimensions')
block_size_int = block_size.astype(int)
if np.any(block_size_int != block_size): # e.g., 2.0 is OK, 2.1 is not
raise ValueError('block_size elements must be integers')
return data, block_size_int
def reshape_as_blocks(data, block_size):
"""
Reshape a data array into blocks.
This is useful to efficiently apply functions on block subsets of
the data instead of using loops. The reshaped array is a view of
the input data array.
.. versionadded:: 4.1
Parameters
----------
data : ndarray
The input data array.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis. Each dimension
of ``block_size`` must divide evenly into the corresponding
dimension of ``data``.
Returns
-------
output : ndarray
The reshaped array as a view of the input ``data`` array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import reshape_as_blocks
>>> data = np.arange(16).reshape(4, 4)
>>> data
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> reshape_as_blocks(data, (2, 2))
array([[[[ 0, 1],
[ 4, 5]],
[[ 2, 3],
[ 6, 7]]],
[[[ 8, 9],
[12, 13]],
[[10, 11],
[14, 15]]]])
"""
data, block_size = _process_block_inputs(data, block_size)
if np.any(np.mod(data.shape, block_size) != 0):
raise ValueError('Each dimension of block_size must divide evenly '
'into the corresponding dimension of data')
nblocks = np.array(data.shape) // block_size
new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)
nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices
block_idx = tuple(range(1, len(new_shape), 2)) # odd indices
return data.reshape(new_shape).transpose(nblocks_idx + block_idx)
@support_nddata
def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array-like
The data to be resampled.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a `~numpy.ndarray` along with an ``axis`` keyword,
which defines the axis or axes along which the function is
applied. The ``axis`` keyword must accept multiple axes as a
tuple. The default is `~numpy.sum`, which provides block
summation (and conserves the data sum).
Returns
-------
output : array-like
The resampled data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +FLOAT_CMP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +FLOAT_CMP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
data, block_size = _process_block_inputs(data, block_size)
nblocks = np.array(data.shape) // block_size
size_init = nblocks * block_size # evenly-divisible size
# trim data if necessary
for axis in range(data.ndim):
if data.shape[axis] != size_init[axis]:
data = data.swapaxes(0, axis)
data = data[:size_init[axis]]
data = data.swapaxes(0, axis)
reshaped = reshape_as_blocks(data, block_size)
axis = tuple(range(data.ndim, reshaped.ndim))
return func(reshaped, axis=axis)
@support_nddata
def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array-like
The data to be block replicated.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array-like
The block-replicated data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data, block_size = _process_block_inputs(data, block_size)
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
data = data / float(np.prod(block_size))
return data
|
530e7aa46b684a9311378124bd8530b93460f350a15acb12dcfdd74ffd8c7b1c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.utils import lazyproperty
from astropy.wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from astropy.wcs import Sip
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'NoOverlapError', 'PartialOverlapError',
'Cutout2D']
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if any(~np.isfinite(position)):
raise ValueError('Input position contains invalid values (NaNs or '
'infs).')
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# define the min/max pixel indices
indices_min = [int(np.ceil(pos - (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
indices_max = [int(np.ceil(pos + (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, indices_min),
min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -indices_min),
min(large_shape - indices_min,
indices_max - indices_min))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += ('fill_value is inconsistent with the data type of '
'the input array (e.g., fill_value cannot be set to '
'np.nan if the input array has integer type). Please '
'change either the input array dtype or the '
'fill_value.',)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(large_shape >= small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape,
position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`astropy:cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : ndarray
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, or `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : float or int, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : (2,) tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : (2,) tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : (2,) tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : (2,) tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or None
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if wcs is None:
wcs = getattr(data, 'wcs', None)
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
2a3550cc52868c9b3e9599bb19101dd009351ef68f544fe09eb19ce6ec3c1c49 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains a class equivalent to pre-1.0 NDData.
import numpy as np
from astropy.units import UnitsError, UnitConversionError, Unit
from astropy import log
from .nddata import NDData
from .nduncertainty import NDUncertainty
from .mixins.ndslicing import NDSlicingMixin
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .flag_collection import FlagCollection
__all__ = ['NDDataArray']
class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData):
"""
An ``NDData`` object with arithmetic. This class is functionally equivalent
to ``NDData`` in astropy versions prior to 1.0.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units, flags,
and/or a coordinate system.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : ndarray or `NDData`
The actual data contained in this `NDData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : array-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
flags : array-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : None, optional
WCS-object containing the world coordinate system for the data.
.. warning::
This is not yet defined because the discussion of how best to
represent this class's WCS system generically is still under
consideration. For now just leave it as None
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
Raises
------
ValueError :
If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match
shape) onto ``data``.
"""
def __init__(self, data, *args, flags=None, **kwargs):
# Initialize with the parent...
super().__init__(data, *args, **kwargs)
# ...then reset uncertainty to force it to go through the
# setter logic below. In base NDData all that is done is to
# set self._uncertainty to whatever uncertainty is passed in.
self.uncertainty = self._uncertainty
# Same thing for mask.
self.mask = self._mask
# Initial flags because it is no longer handled in NDData
# or NDDataBase.
if isinstance(data, NDDataArray):
if flags is None:
flags = data.flags
else:
log.info("Overwriting NDDataArrays's current "
"flags with specified flags")
self.flags = flags
# Implement uncertainty as NDUncertainty to support propagation of
# uncertainties in arithmetic operations
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
class_name = self.__class__.__name__
if not self.unit and value._unit:
# Raise an error if uncertainty has unit and data does not
raise ValueError("Cannot assign an uncertainty with unit "
"to {} without "
"a unit".format(class_name))
self._uncertainty = value
self._uncertainty.parent_nddata = self
else:
raise TypeError("Uncertainty must be an instance of "
"a NDUncertainty object")
else:
self._uncertainty = value
# Override unit so that we can add a setter.
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
from . import conf
try:
if self._unit is not None and conf.warn_setting_unit_directly:
log.info('Setting the unit directly changes the unit without '
'updating the data or uncertainty. Use the '
'.convert_unit_to() method to change the unit and '
'scale values appropriately.')
except AttributeError:
# raised if self._unit has not been set yet, in which case the
# warning is irrelevant
pass
if value is None:
self._unit = None
else:
self._unit = Unit(value)
# Implement mask in a way that converts nicely to a numpy masked array
@property
def mask(self):
if self._mask is np.ma.nomask:
return None
else:
return self._mask
@mask.setter
def mask(self, value):
# Check that value is not either type of null mask.
if (value is not None) and (value is not np.ma.nomask):
mask = np.array(value, dtype=np.bool_, copy=False)
if mask.shape != self.data.shape:
raise ValueError("dimensions of mask do not match data")
else:
self._mask = mask
else:
# internal representation should be one numpy understands
self._mask = np.ma.nomask
@property
def shape(self):
"""
shape tuple of this object's data.
"""
return self.data.shape
@property
def size(self):
"""
integer size of this object's data.
"""
return self.data.size
@property
def dtype(self):
"""
`numpy.dtype` of this object's data.
"""
return self.data.dtype
@property
def ndim(self):
"""
integer dimensions of this object's data
"""
return self.data.ndim
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if value is not None:
if isinstance(value, FlagCollection):
if value.shape != self.shape:
raise ValueError("dimensions of FlagCollection does not match data")
else:
self._flags = value
else:
flags = np.array(value, copy=False)
if flags.shape != self.shape:
raise ValueError("dimensions of flags do not match data")
else:
self._flags = flags
else:
self._flags = value
def __array__(self):
"""
This allows code that requests a Numpy array to use an NDData
object as a Numpy array.
"""
if self.mask is not None:
return np.ma.masked_array(self.data, self.mask)
else:
return np.array(self.data)
def __array_prepare__(self, array, context=None):
"""
This ensures that a masked array is returned if self is masked.
"""
if self.mask is not None:
return np.ma.masked_array(array, self.mask)
else:
return array
def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(unit, self.uncertainty.array,
equivalencies=equivalencies)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(data, uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta, unit=unit)
return result
|
907cde2f5c369e53db847bc491a8851c83631d4ed7b772638c9ffa6706736821 | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import warnings
import numbers
from collections import OrderedDict
import numpy as np
__all__ = ['bitfield_to_boolean_mask', 'interpret_bit_flags',
'BitFlagNameMap', 'extend_bit_flag_map', 'InvalidBitFlag']
_ENABLE_BITFLAG_CACHING = True
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(
0, dtype=_MAX_UINT_TYPE, casting='unsafe'
))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count('1') == 1
def _is_int(n):
return (
(isinstance(n, numbers.Integral) and not isinstance(n, bool)) or
(isinstance(n, np.generic) and np.issubdtype(n, np.integer))
)
class InvalidBitFlag(ValueError):
""" Indicates that a value is not an integer that is a power of 2. """
pass
class BitFlag(int):
""" Bit flags: integer values that are powers of 2. """
def __new__(cls, val, doc=None):
if isinstance(val, tuple):
if doc is not None:
raise ValueError("Flag's doc string cannot be provided twice.")
val, doc = val
if not (_is_int(val) and _is_bit_flag(val)):
raise InvalidBitFlag(
"Value '{}' is not a valid bit flag: bit flag value must be "
"an integral power of two.".format(val)
)
s = int.__new__(cls, val)
if doc is not None:
s.__doc__ = doc
return s
class BitFlagNameMeta(type):
def __new__(mcls, name, bases, members):
for k, v in members.items():
if not k.startswith('_'):
v = BitFlag(v)
attr = [k for k in members.keys() if not k.startswith('_')]
attrl = list(map(str.lower, attr))
if _ENABLE_BITFLAG_CACHING:
cache = OrderedDict()
for b in bases:
for k, v in b.__dict__.items():
if k.startswith('_'):
continue
kl = k.lower()
if kl in attrl:
idx = attrl.index(kl)
raise AttributeError("Bit flag '{:s}' was already defined."
.format(attr[idx]))
if _ENABLE_BITFLAG_CACHING:
cache[kl] = v
members = {k: v if k.startswith('_') else BitFlag(v)
for k, v in members.items()}
if _ENABLE_BITFLAG_CACHING:
cache.update({k.lower(): v for k, v in members.items()
if not k.startswith('_')})
members = {'_locked': True, '__version__': '', **members,
'_cache': cache}
else:
members = {'_locked': True, '__version__': '', **members}
return super().__new__(mcls, name, bases, members)
def __setattr__(cls, name, val):
if name == '_locked':
return super().__setattr__(name, True)
else:
if name == '__version__':
if cls._locked:
raise AttributeError("Version cannot be modified.")
return super().__setattr__(name, val)
err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}"
if cls._locked:
raise AttributeError(err_msg)
namel = name.lower()
if _ENABLE_BITFLAG_CACHING:
if not namel.startswith('_') and namel in cls._cache:
raise AttributeError(err_msg)
else:
for b in cls.__bases__:
if not namel.startswith('_') and namel in list(map(str.lower, b.__dict__)):
raise AttributeError(err_msg)
if namel in list(map(str.lower, cls.__dict__)):
raise AttributeError(err_msg)
val = BitFlag(val)
if _ENABLE_BITFLAG_CACHING and not namel.startswith('_'):
cls._cache[namel] = val
return super().__setattr__(name, val)
def __getattr__(cls, name):
if _ENABLE_BITFLAG_CACHING:
flagnames = cls._cache
else:
flagnames = {k.lower(): v for k, v in cls.__dict__.items()}
flagnames.update({k.lower(): v for b in cls.__bases__
for k, v in b.__dict__.items()})
try:
return flagnames[name.lower()]
except KeyError:
raise AttributeError(f"Flag '{name}' not defined")
def __getitem__(cls, key):
return cls.__getattr__(key)
def __add__(cls, items):
if not isinstance(items, dict):
if not isinstance(items[0], (tuple, list)):
items = [items]
items = dict(items)
return extend_bit_flag_map(
cls.__name__ + '_' + '_'.join([k for k in items]),
cls,
**items
)
def __iadd__(cls, other):
raise NotImplementedError(
"Unary '+' is not supported. Use binary operator instead."
)
def __delattr__(cls, name):
raise AttributeError("{:s}: cannot delete {:s} member."
.format(cls.__name__, cls.mro()[-2].__name__))
def __delitem__(cls, name):
raise AttributeError("{:s}: cannot delete {:s} member."
.format(cls.__name__, cls.mro()[-2].__name__))
def __repr__(cls):
return f"<{cls.mro()[-2].__name__:s} '{cls.__name__:s}'>"
class BitFlagNameMap(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
pass
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta,
cls_name,
(base_cls, ),
{'_locked': False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return (~int(bit_flags) if flip_bits else int(bit_flags))
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ['', 'NONE', 'INDEF']:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find('~')
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count('(')
nrpar = bit_flags.count(')')
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find('(')
rpar_pos = bit_flags.rfind(')')
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError("Incorrect syntax (incorrect use of "
"parenthesis) in bit flag list.")
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in '+,|') > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if ',' in bit_flags:
bit_flags = bit_flags.split(',')
elif '+' in bit_flags:
bit_flags = bit_flags.split('+')
elif '|' in bit_flags:
bit_flags = bit_flags.split('|')
else:
if bit_flags == '':
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, '__iter__'):
if not all([_is_int(flag) for flag in bit_flags]):
if (flag_name_map is not None and all([isinstance(flag, str)
for flag in bit_flags])):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError("Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name.")
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError("Input list contains invalid (not powers of two) "
"bit flag: {:d}".format(v))
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None,
good_mask_value=False, dtype=np.bool_,
flag_name_map=None):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits,
flag_name_map=flag_name_map)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype.type,
casting='unsafe')
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
a652c11af8c77636655f20b94eb3eee2f33077e19981a9ca2202fb6ad2b3efac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from .compat import NDDataArray
from .nduncertainty import (
StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance)
from astropy.io import fits, registry
from astropy import units as u
from astropy import log
from astropy.wcs import WCS
from astropy.utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit ** 2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit ** 2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE'):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError('only uncertainties of type {} can be saved.'
.format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None):
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
'saving uncertainties with a unit that is not '
'equivalent to the unit from the data unit is not '
'supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f'HIERARCH {key.upper()}'] = (
short_name, f"Shortened name for {key}")
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s,
'ELECTRONS': u.electron,
'electrons': u.electron}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
_PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'])
_CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'])
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS information from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = '{}_{}_{}'
polynomials = ['A', 'B', 'AP', 'BP']
for poly in polynomials:
order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j),
ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None,
key_uncertainty_type='UTYPE', **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f'unsupported keyword: {key}.'
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (hdus.info(hdu)[i][3] == 'ImageHDU' and
hdus.fileinfo(i)['datSpan'] > 0):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(
ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, key_uncertainty_type='UTYPE', **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
|
18876075cd3805976c17b2d3df95dc03fdc9190bb87669f7d4d3106d28f55ea4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
import numpy as np
from copy import deepcopy
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
from astropy import log
from astropy.units import Unit, Quantity
from astropy.utils.metadata import MetaData
from astropy.wcs.wcsapi import (BaseLowLevelWCS, BaseHighLevelWCS,
SlicedLowLevelWCS, HighLevelWCSWrapper)
__all__ = ['NDData']
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : unit-like, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None, copy=False):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if (unit is not None and data.unit is not None and
unit != data.unit):
log.info("overwriting NDData's current "
"unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info("overwriting NDData's current "
"uncertainty with specified uncertainty.")
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current "
"mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current "
"wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current "
"meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, 'mask') and hasattr(data, 'data'):
# Separating data and mask
if mask is not None:
log.info("overwriting Masked Objects's current "
"mask with specified mask.")
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current "
"unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (not hasattr(data, 'shape') or not hasattr(data, '__getitem__') or
not hasattr(data, '__array__')):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == 'O':
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = None
if wcs is not None:
# Validate the wcs
self.wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
def __str__(self):
data = str(self.data)
unit = f" {self.unit}" if self.unit is not None else ''
return data + unit
def __repr__(self):
prefix = self.__class__.__name__ + '('
data = np.array2string(self.data, separator=', ', prefix=prefix)
unit = f", unit='{self.unit}'" if self.unit is not None else ''
return ''.join((prefix, data, unit, ')'))
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@wcs.setter
def wcs(self, wcs):
if self._wcs is not None and wcs is not None:
raise ValueError("You can only set the wcs attribute with a WCS if no WCS is present.")
if wcs is None or isinstance(wcs, BaseHighLevelWCS):
self._wcs = wcs
elif isinstance(wcs, BaseLowLevelWCS):
self._wcs = HighLevelWCSWrapper(wcs)
else:
raise TypeError("The wcs argument must implement either the high or"
" low level WCS API.")
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, 'uncertainty_type'):
log.info('uncertainty should have attribute uncertainty_type.')
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
ff59883f22824c331420b9264d4b127e61a4f98e8868562aef3a7eedf1c0455b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import numpy as np
from astropy.utils.misc import isiterable
__all__ = ['FlagCollection']
class FlagCollection(OrderedDict):
"""
The purpose of this class is to provide a dictionary for
containing arrays of flags for the `NDData` class. Flags should be
stored in Numpy arrays that have the same dimensions as the parent
data, so the `FlagCollection` class adds shape checking to an
ordered dictionary class.
The `FlagCollection` should be initialized like an
`~collections.OrderedDict`, but with the addition of a ``shape=``
keyword argument used to pass the NDData shape.
"""
def __init__(self, *args, **kwargs):
if 'shape' in kwargs:
self.shape = kwargs.pop('shape')
if not isiterable(self.shape):
raise ValueError("FlagCollection shape should be "
"an iterable object")
else:
raise Exception("FlagCollection should be initialized with "
"the shape of the data")
OrderedDict.__init__(self, *args, **kwargs)
def __setitem__(self, item, value, **kwargs):
if isinstance(value, np.ndarray):
if value.shape == self.shape:
OrderedDict.__setitem__(self, item, value, **kwargs)
else:
raise ValueError("flags array shape {} does not match data "
"shape {}".format(value.shape, self.shape))
else:
raise TypeError("flags should be given as a Numpy array")
|
07114ad1ca82fdc4b4525fe065d85d344a7b6b7e245d8e74545cb1e08cb5df54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing utilities. Not part of the public API!"""
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
def assert_wcs_seem_equal(wcs1, wcs2):
"""Just checks a few attributes to make sure wcs instances seem to be
equal.
"""
if wcs1 is None and wcs2 is None:
return
assert wcs1 is not None
assert wcs2 is not None
if isinstance(wcs1, BaseHighLevelWCS):
wcs1 = wcs1.low_level_wcs
if isinstance(wcs2, BaseHighLevelWCS):
wcs2 = wcs2.low_level_wcs
assert isinstance(wcs1, WCS)
assert isinstance(wcs2, WCS)
if wcs1 is wcs2:
return
assert wcs1.wcs.compare(wcs2.wcs)
def _create_wcs_simple(naxis, ctype, crpix, crval, cdelt):
wcs = WCS(naxis=naxis)
wcs.wcs.crpix = crpix
wcs.wcs.crval = crval
wcs.wcs.cdelt = cdelt
wcs.wcs.ctype = ctype
return wcs
def create_two_equal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis)
]
def create_two_unequal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["m"]*naxis, crpix=[20]*naxis,
crval=[20]*naxis, cdelt=[2]*naxis),
]
|
7e668bae4f20258a9698f2b39e14f5cec0a4e6266290b947aeb0c0867defa288 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
from inspect import signature
from itertools import islice
import warnings
from functools import wraps
from astropy.utils.exceptions import AstropyUserWarning
from .nddata import NDData
__all__ = ['support_nddata']
# All supported properties are optional except "data" which is mandatory!
SUPPORTED_PROPERTIES = ['data', 'uncertainty', 'mask', 'meta', 'unit', 'wcs',
'flags']
def support_nddata(_func=None, accepts=NDData,
repack=False, returns=None, keeps=None,
**attribute_argument_mapping):
"""Decorator to wrap functions that could accept an NDData instance with
its properties passed as function arguments.
Parameters
----------
_func : callable, None, optional
The function to decorate or ``None`` if used as factory. The first
positional argument should be ``data`` and take a numpy array. It is
possible to overwrite the name, see ``attribute_argument_mapping``
argument.
Default is ``None``.
accepts : class, optional
The class or subclass of ``NDData`` that should be unpacked before
calling the function.
Default is ``NDData``
repack : bool, optional
Should be ``True`` if the return should be converted to the input
class again after the wrapped function call.
Default is ``False``.
.. note::
Must be ``True`` if either one of ``returns`` or ``keeps``
is specified.
returns : iterable, None, optional
An iterable containing strings which returned value should be set
on the class. For example if a function returns data and mask, this
should be ``['data', 'mask']``. If ``None`` assume the function only
returns one argument: ``'data'``.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
keeps : iterable. None, optional
An iterable containing strings that indicate which values should be
copied from the original input to the returned class. If ``None``
assume that no attributes are copied.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
attribute_argument_mapping :
Keyword parameters that optionally indicate which function argument
should be interpreted as which attribute on the input. By default
it assumes the function takes a ``data`` argument as first argument,
but if the first argument is called ``input`` one should pass
``support_nddata(..., data='input')`` to the function.
Returns
-------
decorator_factory or decorated_function : callable
If ``_func=None`` this returns a decorator, otherwise it returns the
decorated ``_func``.
Notes
-----
If properties of ``NDData`` are set but have no corresponding function
argument a Warning is shown.
If a property is set of the ``NDData`` are set and an explicit argument is
given, the explicitly given argument is used and a Warning is shown.
The supported properties are:
- ``mask``
- ``unit``
- ``wcs``
- ``meta``
- ``uncertainty``
- ``flags``
Examples
--------
This function takes a Numpy array for the data, and some WCS information
with the ``wcs`` keyword argument::
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
However, you might have an NDData instance that has the ``wcs`` property
set and you would like to be able to call the function with
``downsample(my_nddata)`` and have the WCS information, if present,
automatically be passed to the ``wcs`` keyword argument.
This decorator can be used to make this possible::
@support_nddata
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
This function can now either be called as before, specifying the data and
WCS separately, or an NDData instance can be passed to the ``data``
argument.
"""
if (returns is not None or keeps is not None) and not repack:
raise ValueError('returns or keeps should only be set if repack=True.')
elif returns is None and repack:
raise ValueError('returns should be set if repack=True.')
else:
# Use empty lists for returns and keeps so we don't need to check
# if any of those is None later on.
if returns is None:
returns = []
if keeps is None:
keeps = []
# Short version to avoid the long variable name later.
attr_arg_map = attribute_argument_mapping
if any(keep in returns for keep in keeps):
raise ValueError("cannot specify the same attribute in `returns` and "
"`keeps`.")
all_returns = returns + keeps
def support_nddata_decorator(func):
# Find out args and kwargs
func_args, func_kwargs = [], []
sig = signature(func).parameters
for param_name, param in sig.items():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("func may not have *args or **kwargs.")
try:
if param.default == param.empty:
func_args.append(param_name)
else:
func_kwargs.append(param_name)
# The comparison to param.empty may fail if the default is a
# numpy array or something similar. So if the comparison fails then
# it's quite obvious that there was a default and it should be
# appended to the "func_kwargs".
except ValueError as exc:
if ('The truth value of an array with more than one element '
'is ambiguous.') in str(exc):
func_kwargs.append(param_name)
else:
raise
# First argument should be data
if not func_args or func_args[0] != attr_arg_map.get('data', 'data'):
raise ValueError("Can only wrap functions whose first positional "
"argument is `{}`"
"".format(attr_arg_map.get('data', 'data')))
@wraps(func)
def wrapper(data, *args, **kwargs):
bound_args = signature(func).bind(data, *args, **kwargs)
unpack = isinstance(data, accepts)
input_data = data
ignored = []
if not unpack and isinstance(data, NDData):
raise TypeError("Only NDData sub-classes that inherit from {}"
" can be used by this function"
"".format(accepts.__name__))
# If data is an NDData instance, we can try and find properties
# that can be passed as kwargs.
if unpack:
# We loop over a list of pre-defined properties
for prop in islice(SUPPORTED_PROPERTIES, 1, None):
# We only need to do something if the property exists on
# the NDData object
try:
value = getattr(data, prop)
except AttributeError:
continue
# Skip if the property exists but is None or empty.
if prop == 'meta' and not value:
continue
elif value is None:
continue
# Warn if the property is set but not used by the function.
propmatch = attr_arg_map.get(prop, prop)
if propmatch not in func_kwargs:
ignored.append(prop)
continue
# Check if the property was explicitly given and issue a
# Warning if it is.
if propmatch in bound_args.arguments:
# If it's in the func_args it's trivial but if it was
# in the func_kwargs we need to compare it to the
# default.
# Comparison to the default is done by comparing their
# identity, this works because defaults in function
# signatures are only created once and always reference
# the same item.
# FIXME: Python interns some values, for example the
# integers from -5 to 255 (any maybe some other types
# as well). In that case the default is
# indistinguishable from an explicitly passed kwarg
# and it won't notice that and use the attribute of the
# NDData.
if (propmatch in func_args or
(propmatch in func_kwargs and
(bound_args.arguments[propmatch] is not
sig[propmatch].default))):
warnings.warn(
"Property {} has been passed explicitly and "
"as an NDData property{}, using explicitly "
"specified value"
"".format(propmatch, '' if prop == propmatch
else ' ' + prop),
AstropyUserWarning)
continue
# Otherwise use the property as input for the function.
kwargs[propmatch] = value
# Finally, replace data by the data attribute
data = data.data
if ignored:
warnings.warn("The following attributes were set on the "
"data object, but will be ignored by the "
"function: " + ", ".join(ignored),
AstropyUserWarning)
result = func(data, *args, **kwargs)
if unpack and repack:
# If there are multiple required returned arguments make sure
# the result is a tuple (because we don't want to unpack
# numpy arrays or compare their length, never!) and has the
# same length.
if len(returns) > 1:
if (not isinstance(result, tuple) or
len(returns) != len(result)):
raise ValueError("Function did not return the "
"expected number of arguments.")
elif len(returns) == 1:
result = [result]
if keeps is not None:
for keep in keeps:
result.append(deepcopy(getattr(input_data, keep)))
resultdata = result[all_returns.index('data')]
resultkwargs = {ret: res
for ret, res in zip(all_returns, result)
if ret != 'data'}
return input_data.__class__(resultdata, **resultkwargs)
else:
return result
return wrapper
# If _func is set, this means that the decorator was used without
# parameters so we have to return the result of the
# support_nddata_decorator decorator rather than the decorator itself
if _func is not None:
return support_nddata_decorator(_func)
else:
return support_nddata_decorator
|
755afc3ac9cc3be9170ce9c36829012d03dbfc1bf5015cd6d8fdb903c867607c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import weakref
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Unit, Quantity, UnitConversionError
__all__ = ['MissingDataAssociationException',
'IncompatibleUncertaintiesException', 'NDUncertainty',
'StdDevUncertainty', 'UnknownUncertainty',
'VarianceUncertainty', 'InverseVariance']
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : unit-like, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if (unit is not None and unit != array._unit):
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current "
"unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if (unit is not None and array.unit is not None and
unit != array.unit):
log.info("overwriting Quantity's current "
"unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated \
uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value.
"""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any.
"""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError("Unit {} is incompatible "
"with unit {} of parent "
"nddata".format(value,
parent_unit))
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData "
"object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
if self.unit is None:
if parent_unit is None:
self.unit = None
else:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError("Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit,
parent_unit))
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + '('
try:
body = np.array2string(self.array, separator=', ', prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return ''.join([prefix, body, ')'])
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError('The state should contain 3 items.')
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference.
"""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or ndarray
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError("{} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__))
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == 'add':
result = self._propagate_add(other_uncert, result_data,
correlation)
elif operation.__name__ == 'subtract':
result = self._propagate_subtract(other_uncert, result_data,
correlation)
elif operation.__name__ == 'multiply':
result = self._propagate_multiply(other_uncert, result_data,
correlation)
elif operation.__name__ in ['true_divide', 'divide']:
result = self._propagate_divide(other_uncert, result_data,
correlation)
else:
raise ValueError('unsupported operation')
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class.
"""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return 'unknown'
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types.
"""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_add_sub(self, other_uncert, result_data, correlation,
subtract=False,
to_variance=lambda x: x, from_variance=lambda x: x):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit ** 2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if (other_uncert.unit is not None and
result_unit_sq != to_variance(other_uncert.unit)):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = to_variance(other_uncert.array <<
other_uncert.unit).to(result_unit_sq).value
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if self.unit is not None and to_variance(self.unit) != self.parent_nddata.unit**2:
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(self, other_uncert, result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (other_uncert.unit and
to_variance(1 * other_uncert.unit) !=
((1 * other_uncert.parent_nddata.unit)**2).unit):
d_b = to_variance(other_uncert.array << other_uncert.unit).to(
(1 * other_uncert.parent_nddata.unit)**2).value
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (self.unit and
to_variance(1 * self.unit) !=
((1 * self.parent_nddata.unit)**2).unit):
d_a = to_variance(self.array << self.unit).to(
(1 * self.parent_nddata.unit)**2).value
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (2 * correlation * np.sqrt(d_a * d_b) *
self.parent_nddata.data *
other_uncert.parent_nddata.data)
else:
corr = 0
if divide:
return from_variance((left + right + correlation_sign * corr) /
other_uncert.parent_nddata.data**4)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation.
"""
return 'std'
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt)
def _data_unit_to_uncertainty_unit(self, value):
return value
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance.
"""
return 'var'
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True)
def _data_unit_to_uncertainty_unit(self, value):
return value ** 2
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance"""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance.
"""
return 'ivar'
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=False,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(other_uncert, result_data,
correlation, subtract=True,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(other_uncert,
result_data, correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value ** 2
|
c6905a0e821ad74b5c19dfa4118df045377ec4aa77fb30132794e045e895edbe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import warnings
from astropy.cosmology import units as cu
from astropy.io import registry as io_registry
from astropy.units import add_enabled_units
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["CosmologyRead", "CosmologyWrite",
"CosmologyFromFormat", "CosmologyToFormat"]
__doctest_skip__ = __all__
# ==============================================================================
# Read / Write
readwrite_registry = io_registry.UnifiedIORegistry()
class CosmologyRead(io_registry.UnifiedReadWrite):
"""Read and parse data to a `~astropy.cosmology.Cosmology`.
This function provides the Cosmology interface to the Astropy unified I/O
layer. This allows easily reading a file in supported data formats using
syntax such as::
>>> from astropy.cosmology import Cosmology
>>> cosmo1 = Cosmology.read('<file name>')
When the ``read`` method is called from a subclass the subclass will
provide a keyword argument ``cosmology=<class>`` to the registered read
method. The method uses this cosmology class, regardless of the class
indicated in the file, and sets parameters' default values from the class'
signature.
Get help on the available readers using the ``help()`` method::
>>> Cosmology.read.help() # Get help reading and list supported formats
>>> Cosmology.read.help(format='<format>') # Get detailed help on a format
>>> Cosmology.read.list_formats() # Print list of available formats
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args
Positional arguments passed through to data reader. If supplied the
first argument is typically the input filename.
format : str (optional, keyword-only)
File format specifier.
**kwargs
Keyword arguments passed through to data reader.
Returns
-------
out : `~astropy.cosmology.Cosmology` subclass instance
`~astropy.cosmology.Cosmology` corresponding to file contents.
Notes
-----
"""
def __init__(self, instance, cosmo_cls):
super().__init__(instance, cosmo_cls, "read", registry=readwrite_registry)
def __call__(self, *args, **kwargs):
from astropy.cosmology.core import Cosmology
# so subclasses can override, also pass the class as a kwarg.
# allows for `FlatLambdaCDM.read` and
# `Cosmology.read(..., cosmology=FlatLambdaCDM)`
if self._cls is not Cosmology:
kwargs.setdefault("cosmology", self._cls) # set, if not present
# check that it is the correct cosmology, can be wrong if user
# passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)`
valid = (self._cls, self._cls.__qualname__)
if kwargs["cosmology"] not in valid:
raise ValueError(
"keyword argument `cosmology` must be either the class "
f"{valid[0]} or its qualified name '{valid[1]}'")
with add_enabled_units(cu):
cosmo = self.registry.read(self._cls, *args, **kwargs)
return cosmo
class CosmologyWrite(io_registry.UnifiedReadWrite):
"""Write this Cosmology object out in the specified format.
This function provides the Cosmology interface to the astropy unified I/O
layer. This allows easily writing a file in supported data formats
using syntax such as::
>>> from astropy.cosmology import Planck18
>>> Planck18.write('<file name>')
Get help on the available writers for ``Cosmology`` using the ``help()``
method::
>>> Cosmology.write.help() # Get help writing and list supported formats
>>> Cosmology.write.help(format='<format>') # Get detailed help on format
>>> Cosmology.write.list_formats() # Print list of available formats
Parameters
----------
*args
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str (optional, keyword-only)
File format specifier.
**kwargs
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=readwrite_registry)
def __call__(self, *args, **kwargs):
self.registry.write(self._instance, *args, **kwargs)
# ==============================================================================
# Format Interchange
# for transforming instances, e.g. Cosmology <-> dict
convert_registry = io_registry.UnifiedIORegistry()
class CosmologyFromFormat(io_registry.UnifiedReadWrite):
"""Transform object to a `~astropy.cosmology.Cosmology`.
This function provides the Cosmology interface to the Astropy unified I/O
layer. This allows easily parsing supported data formats using
syntax such as::
>>> from astropy.cosmology import Cosmology
>>> cosmo1 = Cosmology.from_format(cosmo_mapping, format='mapping')
When the ``from_format`` method is called from a subclass the subclass will
provide a keyword argument ``cosmology=<class>`` to the registered parser.
The method uses this cosmology class, regardless of the class indicated in
the data, and sets parameters' default values from the class' signature.
Get help on the available readers using the ``help()`` method::
>>> Cosmology.from_format.help() # Get help and list supported formats
>>> Cosmology.from_format.help('<format>') # Get detailed help on a format
>>> Cosmology.from_format.list_formats() # Print list of available formats
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
obj : object
The object to parse according to 'format'
*args
Positional arguments passed through to data parser.
format : str or None, optional keyword-only
Object format specifier. For `None` (default) CosmologyFromFormat tries
to identify the correct format.
**kwargs
Keyword arguments passed through to data parser.
Parsers should accept the following keyword arguments:
- cosmology : the class (or string name thereof) to use / check when
constructing the cosmology instance.
Returns
-------
out : `~astropy.cosmology.Cosmology` subclass instance
`~astropy.cosmology.Cosmology` corresponding to ``obj`` contents.
"""
def __init__(self, instance, cosmo_cls):
super().__init__(instance, cosmo_cls, "read", registry=convert_registry)
def __call__(self, obj, *args, format=None, **kwargs):
from astropy.cosmology.core import Cosmology
# so subclasses can override, also pass the class as a kwarg.
# allows for `FlatLambdaCDM.read` and
# `Cosmology.read(..., cosmology=FlatLambdaCDM)`
if self._cls is not Cosmology:
kwargs.setdefault("cosmology", self._cls) # set, if not present
# check that it is the correct cosmology, can be wrong if user
# passes in e.g. `w0wzCDM.read(..., cosmology=FlatLambdaCDM)`
valid = (self._cls, self._cls.__qualname__)
if kwargs["cosmology"] not in valid:
raise ValueError(
"keyword argument `cosmology` must be either the class "
f"{valid[0]} or its qualified name '{valid[1]}'")
with add_enabled_units(cu):
cosmo = self.registry.read(self._cls, obj, *args, format=format, **kwargs)
return cosmo
class CosmologyToFormat(io_registry.UnifiedReadWrite):
"""Transform this Cosmology to another format.
This function provides the Cosmology interface to the astropy unified I/O
layer. This allows easily transforming to supported data formats
using syntax such as::
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format("mapping")
{'cosmology': astropy.cosmology.core.FlatLambdaCDM,
'name': 'Planck18',
'H0': <Quantity 67.66 km / (Mpc s)>,
'Om0': 0.30966,
...
Get help on the available representations for ``Cosmology`` using the
``help()`` method::
>>> Cosmology.to_format.help() # Get help and list supported formats
>>> Cosmology.to_format.help('<format>') # Get detailed help on format
>>> Cosmology.to_format.list_formats() # Print list of available formats
Parameters
----------
format : str
Format specifier.
*args
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
**kwargs
Keyword arguments passed through to data writer.
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=convert_registry)
def __call__(self, format, *args, **kwargs):
return self.registry.write(self._instance, None, *args, format=format,
**kwargs)
|
74bcdc4e801627cb6d98839998bf856bb6c6b1a556cdab7315493169e132d998 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" astropy.cosmology contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<https://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from . import core, flrw, funcs, parameter, units, utils
from . import io # needed before 'realizations' # isort: split
from . import realizations
from .core import *
from .flrw import *
from .funcs import *
from .parameter import *
from .realizations import available, default_cosmology
from .utils import *
__all__ = (core.__all__ + flrw.__all__ # cosmology classes
+ realizations.__all__ # instances thereof
+ ["units"]
+ funcs.__all__ + parameter.__all__ + utils.__all__) # utils
def __getattr__(name):
"""Get realizations using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in realizations.available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
return getattr(realizations, name)
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
|
a563c44ea373606c2aa93c54655c2c69444c59dea5b4678d57d02e363f4b431e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from abc import abstractmethod
from math import acos, cos, exp, floor, inf, log, pi, sin, sqrt
from numbers import Number
import numpy as np
import astropy.constants as const
import astropy.units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from . import scalar_inv_efuncs
from . import units as cu
from .core import Cosmology, FlatCosmologyMixin, Parameter
from .parameter import _validate_non_negative, _validate_with_unit
from .utils import aszarr, vectorize_redshift_method
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
from scipy.special import ellipkinc, hyp2f1
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
def ellipkinc(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
def hyp2f1(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM",
"w0waCDM", "Flatw0waCDM", "wpwaCDM", "w0wzCDM", "FlatFLRWMixin"]
__doctest_requires__ = {'*': ['scipy']}
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
radian_in_arcsec = (1 * u.rad).to(u.arcsec)
radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
a_B_c2 = (4 * const.sigma_sb / const.c ** 3).cgs.value
# Boltzmann constant in eV / K
kB_evK = const.k_B.to(u.eV / u.K)
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)", fvalidate="scalar")
Om0 = Parameter(doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative")
Ode0 = Parameter(doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float")
Tcmb0 = Parameter(doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin", fvalidate="scalar")
Neff = Parameter(doc="Number of effective neutrino species.", fvalidate="non-negative")
m_nu = Parameter(doc="Mass of neutrino species.",
unit="eV", equivalencies=u.mass_energy())
Ob0 = Parameter(doc="Omega baryon; baryonic matter density/critical density at z=0.")
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * H0units_to_invs
# Hubble time
self._hubble_time = (sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = critdens_const * H0_s ** 2
self._critical_density0 = cd0value << u.g / u.cm ** 3
# Compute photon density from Tcmb
self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = self._m_nu[massive].value if self._massivenu else None
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError("baryonic density can not be larger than total matter density.")
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError("unexpected number of neutrino masses β "
f"expected {nneutrinos}, got {len(value)}.")
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""Temperature of the neutrino background as `~astropy.units.Quantity` at z=0."""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError("Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density")
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))**(-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, np.inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2)
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).", AstropyUserWarning)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh ** 3 / (2.0 * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm ** 2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError("subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`")
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
def __equiv__(self, other):
"""flat-FLRW equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.FLRW` subclass instance
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`True` if 'other' is of the same class / non-flat class (e.g.
``FlatLambdaCDM`` and ``LambdaCDM``) has matching parameters
and parameter values. `False` if 'other' is of the same class but
has different parameters. `NotImplemented` otherwise.
"""
# check if case (1): same class & parameters
if isinstance(other, FlatFLRWMixin):
return super().__equiv__(other)
# check cases (3, 4), if other is the non-flat version of this class
# this makes the assumption that any further subclass of a flat cosmo
# keeps the same physics.
comparable_classes = [c for c in self.__class__.mro()[1:]
if (issubclass(c, FLRW) and c is not FLRW)]
if other.__class__ not in comparable_classes:
return NotImplemented
# check if have equivalent parameters
# check all parameters in other match those in 'self' and 'other' has
# no extra parameters (case (2)) except for 'Ode0' and that other
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
and all(np.all(getattr(self, k) == getattr(other, k)) # equal
for k in self.__parameters__)
and other.is_flat
)
return params_eq
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of
the critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0)
if self._Ok0 == 0:
self._optimize_flat_norad()
else:
self._comoving_distance_z1z2 = self._elliptic_comoving_distance_z1z2
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def _optimize_flat_norad(self):
"""Set optimizations for flat LCDM cosmologies with no radiation."""
# Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter)
# The dS case is required because the hypergeometric case
# for Omega_M=0 would lead to an infinity in its argument.
# The EdS case is three times faster than the hypergeometric.
if self._Om0 == 0:
self._comoving_distance_z1z2 = self._dS_comoving_distance_z1z2
self._age = self._dS_age
self._lookback_time = self._dS_lookback_time
elif self._Om0 == 1:
self._comoving_distance_z1z2 = self._EdS_comoving_distance_z1z2
self._age = self._EdS_age
self._lookback_time = self._EdS_lookback_time
else:
self._comoving_distance_z1z2 = self._hypergeometric_comoving_distance_z1z2
self._age = self._flat_age
self._lookback_time = self._flat_lookback_time
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = -1`.
"""
z = aszarr(z)
return -1.0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
z = aszarr(z)
return np.ones(z.shape) if hasattr(z, "shape") else 1.0
def _elliptic_comoving_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as an elliptic integral [1]_.
Not valid or appropriate for flat cosmologies (Ok0=0).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift
in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
# The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0.
# Use the explicit integral solution for these cases.
if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0:
return self._integral_comoving_distance_z1z2(z1, z2)
b = -(27. / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3
kappa = b / abs(b)
if (b < 0) or (2 < b):
def phi_z(Om0, Ok0, kappa, y1, A, z):
return np.arccos(((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A) /
((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A))
v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1. / 3)
y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3
A = sqrt(y1 * (3 * y1 + 2))
g = 1 / sqrt(A)
k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A)
phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2)
# Get lower-right 0<b<2 solution in Om0, Ode0 plane.
# Fot the upper-left 0<b<2 solution the Big Bang didn't happen.
elif (0 < b) and (b < 2) and self._Om0 > self._Ode0:
def phi_z(Om0, Ok0, y1, y2, z):
return np.arcsin(np.sqrt((y1 - y2) /
((z + 1.0) * Om0 / abs(Ok0) + y1)))
yb = cos(acos(1 - b) / 3)
yc = sqrt(3) * sin(acos(1 - b) / 3)
y1 = (1. / 3) * (-1 + yb + yc)
y2 = (1. / 3) * (-1 - 2 * yb)
y3 = (1. / 3) * (-1 + yb - yc)
g = 2 / sqrt(y1 - y2)
k2 = (y1 - y3) / (y1 - y2)
phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2)
else:
return self._integral_comoving_distance_z1z2(z1, z2)
prefactor = self._hubble_distance / sqrt(abs(self._Ok0))
return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2))
def _dS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology
(de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
The de Sitter case has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
return self._hubble_distance * (z2 - z1)
def _EdS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology
(Einstein - de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance
has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
prefactor = 2 * self._hubble_distance
return prefactor * ((z1 + 1.0)**(-1./2) - (z2 + 1.0)**(-1./2))
def _hypergeometric_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as a hypergeometric function [1]_.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
s = ((1 - self._Om0) / self._Om0) ** (1./3)
# Use np.sqrt here to handle negative s (Om0>1).
prefactor = self._hubble_distance / np.sqrt(s * self._Om0)
return prefactor * (self._T_hypergeometric(s / (z1 + 1.0)) -
self._T_hypergeometric(s / (z2 + 1.0)))
def _T_hypergeometric(self, x):
r"""Compute value using Gauss Hypergeometric function 2F1.
.. math::
T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2};
\frac{7}{6}; -x^3 \right)
Notes
-----
The :func:`scipy.special.hyp2f1` code already implements the
hypergeometric transformation suggested by Baes et al. [1]_ for use in
actual numerical evaulations.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3)
def _dS_age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
The age of a de Sitter Universe is infinite.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
t = (inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float))
return self._hubble_time * t
def _EdS_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
return (2./3) * self._hubble_time * (aszarr(z) + 1.0) ** (-1.5)
def _flat_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
# Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh
# to handle properly the complex numbers for 1 - Om0 < 0
prefactor = (2./3) * self._hubble_time / np.emath.sqrt(1 - self._Om0)
arg = np.arcsinh(np.emath.sqrt((1 / self._Om0 - 1 + 0j) / (aszarr(z) + 1.0)**3))
return (prefactor * arg).real
def _EdS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._EdS_age(0) - self._EdS_age(z)
def _dS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
.. math::
a = exp(H * t) \ \text{where t=0 at z=0}
t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z)
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * np.log(aszarr(z) + 1.0)
def _flat_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._flat_age(0) - self._flat_age(z)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)**(-0.5)
class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0)
# Repeat the optimization reassignments here because the init
# of the LambaCDM above didn't actually create a flat cosmology.
# That was done through the explicit tweak setting self._Ok0.
self._optimize_flat_norad()
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)**(-0.5)
class wCDM(FLRW):
"""
FLRW cosmology with a constant dark energy equation of state and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state.", fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_0`.
"""
z = aszarr(z)
return self._w0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
:math:`I = \left(1 + z\right)^{3\left(1 + w_0\right)}`
"""
return (aszarr(z) + 1.0) ** (3.0 * (1. + self._w0))
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
class FlatwCDM(FlatFLRWMixin, wCDM):
"""
FLRW cosmology with a constant dark energy equation of state and no spatial
curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) +
self._Ode0 * zp1 ** (3. * (1 + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
class w0waCDM(FLRW):
r"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
self.wa = wa
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \frac{z}{1+z}`.
"""
z = aszarr(z)
return self._w0 + self._wa * z / (z + 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3 * (1 + self._w0 + self._wa)) * np.exp(-3 * self._wa * z / zp1)
class Flatw0waCDM(FlatFLRWMixin, w0waCDM):
"""FLRW cosmology with a CPL dark energy equation of state and no
curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import Flatw0waCDM
>>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
def __init__(self, H0, Om0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, wa=wa, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
class wpwaCDM(FLRW):
r"""
FLRW cosmology with a CPL dark energy equation of state, a pivot redshift,
and curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to
have a pivot redshift as in the findings of the Dark Energy Task Force
[3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float or quantity-like ['redshift'], optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wpwaCDM
>>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
.. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein,
D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., &
Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure
of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721.
"""
wp = Parameter(doc="Dark energy equation of state at the pivot redshift zp.", fvalidate="float")
wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float")
zp = Parameter(doc="The pivot redshift, where w(z) = wp.", unit=cu.redshift)
def __init__(self, H0, Om0, Ode0, wp=-1.0, wa=0.0, zp=0.0 * cu.redshift,
Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *,
name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.wp = wp
self.wa = wa
self.zp = zp
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp.value)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._wp, apiv, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._wp, apiv, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._wp,
apiv, self._wa)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where
:math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`.
"""
apiv = 1.0 / (1.0 + self._zp.value)
return self._wp + self._wa * (apiv - 1.0 / (aszarr(z) + 1.0))
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
a_p = \frac{1}{1 + z_p}
I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
apiv = 1. / (1. + self._zp.value)
return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \
np.exp(-3. * self._wa * z / zp1)
class w0wzCDM(FLRW):
"""
FLRW cosmology with a variable dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wz = Parameter(doc="Derivative of the dark energy equation of state w.r.t. z.", fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, wz=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
self.wz = wz
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wz)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wz)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wz)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`.
"""
return self._w0 + self._wz * aszarr(z)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(-3 w_z z\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3. * (1. + self._w0 - self._wz)) * np.exp(-3. * self._wz * z)
|
e49231749ad552e062197ebbba862da1c031a01008ec76d5c0a6af792afaf50b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains dictionaries with sets of parameters for a
given cosmology.
Each cosmology has the following parameters defined:
========== =====================================
Oc0 Omega cold dark matter at z=0
Ob0 Omega baryon at z=0
Om0 Omega matter at z=0
flat Is this assumed flat? If not, Ode0 must be specified
Ode0 Omega dark energy at z=0 if flat is False
H0 Hubble parameter at z=0 in km/s/Mpc
n Density perturbation spectral index
Tcmb0 Current temperature of the CMB
Neff Effective number of neutrino species
m_nu Assumed mass of neutrino species, in eV.
sigma8 Density perturbation amplitude
tau Ionisation optical depth
z_reion Redshift of hydrogen reionisation
t0 Age of the universe in Gyr
reference Reference for the parameters
========== =====================================
The list of cosmologies available are given by the tuple
`available`. Current cosmologies available:
Planck 2018 (Planck18) parameters from Planck Collaboration 2020,
A&A, 641, A6 (Paper VI), Table 2 (TT, TE, EE + lowE + lensing + BAO)
Planck 2015 (Planck15) parameters from Planck Collaboration 2016, A&A, 594, A13
(Paper XIII), Table 4 (TT, TE, EE + lowP + lensing + ext)
Planck 2013 (Planck13) parameters from Planck Collaboration 2014, A&A, 571, A16
(Paper XVI), Table 5 (Planck + WP + highL + BAO)
WMAP 9 year (WMAP9) parameters from Hinshaw et al. 2013, ApJS, 208, 19,
doi: 10.1088/0067-0049/208/2/19. Table 4 (WMAP9 + eCMB + BAO + H0)
WMAP 7 year (WMAP7) parameters from Komatsu et al. 2011, ApJS, 192, 18,
doi: 10.1088/0067-0049/192/2/18. Table 1 (WMAP + BAO + H0 ML).
WMAP 5 year (WMAP5) parameters from Komatsu et al. 2009, ApJS, 180, 330,
doi: 10.1088/0067-0049/180/2/330. Table 1 (WMAP + BAO + SN ML).
WMAP 3 year (WMAP3) parameters from Spergel et al. 2007, ApJS, 170, 377,
doi: 10.1086/513700. Table 6. (WMAP + SNGold) Obtained from https://lambda.gsfc.nasa.gov/product/map/dr2/params/lcdm_wmap_sngold.cfm
Tcmb0 and Neff are the standard values as also used for WMAP5, 7, 9.
Pending WMAP team approval and subject to change.
WMAP 1 year (WMAP1) parameters from Spergel et al. 2003, ApJS, 148, 175,
doi: 10.1086/377226. Table 7 (WMAP + CBI + ACBAR + 2dFGRS + Lya)
Tcmb0 and Neff are the standard values as also used for WMAP5, 7, 9.
Pending WMAP team approval and subject to change.
"""
# STDLIB
import sys
from types import MappingProxyType
# LOCAL
from .realizations import available
__all__ = ["available"] + list(available)
def __getattr__(name):
"""Get parameters of cosmology representations with lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
from astropy.cosmology import realizations
cosmo = getattr(realizations, name)
m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True)
proxy = MappingProxyType(m)
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, proxy)
return proxy
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
|
725c8903b83c94a766cea2c7f666157b3547c8eac417ec10d93a757730166298 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cosmological units and equivalencies.
""" # (newline needed for unit summary)
import astropy.units as u
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
__all__ = ["littleh", "redshift",
# redshift equivalencies
"dimensionless_redshift", "with_redshift",
"redshift_distance", "redshift_hubble", "redshift_temperature",
# other equivalencies
"with_H0"]
__doctest_requires__ = {('with_redshift', 'redshift_distance'): ['scipy']}
_ns = globals()
###############################################################################
# Cosmological Units
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit.
redshift = u.def_unit(['redshift'], prefixes=False, namespace=_ns,
doc="Cosmological redshift.", format={'latex': r''})
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is disallowed
littleh = u.def_unit(['littleh'], namespace=_ns, prefixes=False,
doc='Reduced/"dimensionless" Hubble constant',
format={'latex': r'h_{100}'})
###############################################################################
# Equivalencies
def dimensionless_redshift():
"""Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the redshift is raised,
and independent of whether it is part of a more complicated unit.
It is similar to u.dimensionless_angles() in this respect.
"""
return u.Equivalency([(redshift, None)], "dimensionless_redshift")
def redshift_distance(cosmology=None, kind="comoving", **atzkw):
"""Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'} or None, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
allowed_kinds = ('comoving', 'lookback', 'luminosity')
if kind not in allowed_kinds:
raise ValueError(f"`kind` is not one of {allowed_kinds}")
method = getattr(cosmology, kind + "_distance")
def z_to_distance(z):
"""Redshift to distance."""
return method(z)
def distance_to_z(d):
"""Distance to redshift."""
return z_at_value(method, d << u.Mpc, **atzkw)
return u.Equivalency([(redshift, u.Mpc, z_to_distance, distance_to_z)],
"redshift_distance",
{'cosmology': cosmology, "distance": kind})
def redshift_hubble(cosmology=None, **atzkw):
"""Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_hubble(z):
"""Redshift to Hubble parameter."""
return cosmology.H(z)
def hubble_to_z(H):
"""Hubble parameter to redshift."""
return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw)
def z_to_littleh(z):
"""Redshift to :math:`h`-unit Quantity."""
return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh
def littleh_to_z(h):
""":math:`h`-unit Quantity to redshift."""
return hubble_to_z(h * 100)
return u.Equivalency([(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z),
(redshift, littleh, z_to_littleh, littleh_to_z)],
"redshift_hubble",
{'cosmology': cosmology})
def redshift_temperature(cosmology=None, **atzkw):
"""Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_Tcmb(z):
return cosmology.Tcmb(z)
def Tcmb_to_z(T):
return z_at_value(cosmology.Tcmb, T << u.K, **atzkw)
return u.Equivalency([(redshift, u.K, z_to_Tcmb, Tcmb_to_z)],
"redshift_temperature",
{'cosmology': cosmology})
def with_redshift(cosmology=None, *,
distance="comoving", hubble=True, Tcmb=True,
atzkw=None):
"""Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
atzkw = atzkw if atzkw is not None else {}
equivs = [] # will append as built
# Hubble <-> Redshift
if hubble:
equivs.extend(redshift_hubble(cosmology, **atzkw))
# CMB Temperature <-> Redshift
if Tcmb:
equivs.extend(redshift_temperature(cosmology, **atzkw))
# Distance <-> Redshift, but need to choose which distance
if distance is not None:
equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw))
# -----------
return u.Equivalency(equivs, "with_redshift",
{'cosmology': cosmology,
'distance': distance, 'hubble': hubble, 'Tcmb': Tcmb})
# ===================================================================
def with_H0(H0=None):
"""
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from .realizations import default_cosmology
H0 = default_cosmology.get().H0
h100_val_unit = u.Unit(100/(H0.to_value(u.km / u.s / u.Mpc)) * littleh)
return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0})
# ===================================================================
# Enable the set of default equivalencies.
# If the cosmology package is imported, this is added to the list astropy-wide.
u.add_enabled_equivalencies(dimensionless_redshift())
# =============================================================================
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
if __doc__ is not None:
__doc__ += _generate_unit_summary(_ns)
|
5a6310e2fe97f6e9a446fe4f52ca0b538260e9689927ed6104d3a303a1406145 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pathlib
import sys
# LOCAL
from astropy.utils.data import get_pkg_data_path
from astropy.utils.decorators import deprecated
from astropy.utils.state import ScienceState
from .core import Cosmology
_COSMOLOGY_DATA_DIR = pathlib.Path(get_pkg_data_path("cosmology", "data", package="astropy"))
available = tuple(sorted([p.stem for p in _COSMOLOGY_DATA_DIR.glob("*.ecsv")]))
__all__ = ["available", "default_cosmology"] + list(available)
__doctest_requires__ = {"*": ["scipy"]}
def __getattr__(name):
"""Make specific realizations from data files with lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
cosmo = Cosmology.read(str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv")
cosmo.__doc__ = (f"{name} instance of {cosmo.__class__.__qualname__} "
f"cosmology\n(from {cosmo.meta['reference']})")
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, cosmo)
return cosmo
def __dir__():
"""Directory, including lazily-imported objects."""
return __all__
#########################################################################
# The science state below contains the current cosmology.
#########################################################################
class default_cosmology(ScienceState):
"""The default cosmology to use.
To change it::
>>> from astropy.cosmology import default_cosmology, WMAP7
>>> with default_cosmology.set(WMAP7):
... # WMAP7 cosmology in effect
... pass
Or, you may use a string::
>>> with default_cosmology.set('WMAP7'):
... # WMAP7 cosmology in effect
... pass
To get the default cosmology:
>>> default_cosmology.get()
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ...
To get a specific cosmology:
>>> default_cosmology.get("Planck13")
FlatLambdaCDM(name="Planck13", H0=67.77 km / (Mpc s), Om0=0.30712, ...
"""
_default_value = "Planck18"
_value = "Planck18"
@classmethod
def get(cls, key=None):
"""Get the science state value of ``key``.
Parameters
----------
key : str or None
The built-in |Cosmology| realization to retrieve.
If None (default) get the current value.
Returns
-------
`astropy.cosmology.Cosmology` or None
`None` only if ``key`` is "no_default"
Raises
------
TypeError
If ``key`` is not a str, |Cosmology|, or None.
ValueError
If ``key`` is a str, but not for a built-in Cosmology
Examples
--------
To get the default cosmology:
>>> default_cosmology.get()
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, ...
To get a specific cosmology:
>>> default_cosmology.get("Planck13")
FlatLambdaCDM(name="Planck13", H0=67.77 km / (Mpc s), Om0=0.30712, ...
"""
if key is None:
key = cls._value
if isinstance(key, str):
# special-case one string
if key == "no_default":
return None
# all other options should be built-in realizations
try:
value = getattr(sys.modules[__name__], key)
except AttributeError:
raise ValueError(f"Unknown cosmology {key!r}. "
f"Valid cosmologies:\n{available}")
elif isinstance(key, Cosmology):
value = key
else:
raise TypeError("'key' must be must be None, a string, "
f"or Cosmology instance, not {type(key)}.")
# validate value to `Cosmology`, if not already
return cls.validate(value)
@deprecated("5.0", alternative="get")
@classmethod
def get_cosmology_from_string(cls, arg):
"""Return a cosmology instance from a string."""
return cls.get(arg)
@classmethod
def validate(cls, value):
"""Return a Cosmology given a value.
Parameters
----------
value : None, str, or `~astropy.cosmology.Cosmology`
Returns
-------
`~astropy.cosmology.Cosmology` instance
Raises
------
TypeError
If ``value`` is not a string or |Cosmology|.
"""
# None -> default
if value is None:
value = cls._default_value
# Parse to Cosmology. Error if cannot.
if isinstance(value, str):
value = cls.get(value)
elif not isinstance(value, Cosmology):
raise TypeError("default_cosmology must be a string or Cosmology instance, "
f"not {value}.")
return value
|
0cf697f9e13d46cb19ec2fcefcaeb5fa9783a44383a4a3f6f84010f6b5a099dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
from math import inf
from numbers import Number
import numpy as np
from astropy.units import Quantity
from astropy.utils import isiterable
from astropy.utils.decorators import deprecated
from . import units as cu
__all__ = [] # nothing is publicly scoped
__doctest_skip__ = ["inf_like", "vectorize_if_needed"]
def vectorize_redshift_method(func=None, nin=1):
"""Vectorize a method of redshift(s).
Parameters
----------
func : callable or None
method to wrap. If `None` returns a :func:`functools.partial`
with ``nin`` loaded.
nin : int
Number of positional redshift arguments.
Returns
-------
wrapper : callable
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to :class:`numpy.ndarray`.
"""
# allow for pie-syntax & setting nin
if func is None:
return functools.partial(vectorize_redshift_method, nin=nin)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to `numpy.ndarray` or scalar.
"""
# process inputs
# TODO! quantity-aware vectorization can simplify this.
zs = [z if not isinstance(z, Quantity) else z.to_value(cu.redshift)
for z in args[:nin]]
# scalar inputs
if all(isinstance(z, (Number, np.generic)) for z in zs):
return func(self, *zs, *args[nin:], **kwargs)
# non-scalar. use vectorized func
return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs)
wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function
# TODO! use frompyfunc when can solve return type errors
return wrapper
@deprecated(
since="5.0",
message="vectorize_if_needed has been removed because it constructs a new ufunc on each call",
alternative="use a pre-vectorized function instead for a target array 'z'"
)
def vectorize_if_needed(f, *x, **vkw):
"""Helper function to vectorize scalar functions on array inputs.
Parameters
----------
f : callable
'f' must accept positional arguments and no mandatory keyword
arguments.
*x
Arguments into ``f``.
**vkw
Keyword arguments into :class:`numpy.vectorize`.
Examples
--------
>>> func = lambda x: x ** 2
>>> vectorize_if_needed(func, 2)
4
>>> vectorize_if_needed(func, [2, 3])
array([4, 9])
"""
return np.vectorize(f, **vkw)(*x) if any(map(isiterable, x)) else f(*x)
@deprecated(
since="5.0",
message="inf_like has been removed because it duplicates functionality provided by numpy.full_like()",
alternative="Use numpy.full_like(z, numpy.inf) instead for a target array 'z'"
)
def inf_like(x):
"""Return the shape of x with value infinity and dtype='float'.
Preserves 'shape' for both array and scalar inputs.
But always returns a float array, even if x is of integer type.
Parameters
----------
x : scalar or array-like
Must work with functions `numpy.isscalar` and `numpy.full_like` (if `x`
is not a scalar`
Returns
-------
`math.inf` or ndarray[float] thereof
Returns a scalar `~math.inf` if `x` is a scalar, an array of floats
otherwise.
Examples
--------
>>> inf_like(0.) # float scalar
inf
>>> inf_like(1) # integer scalar should give float output
inf
>>> inf_like([0., 1., 2., 3.]) # float list
array([inf, inf, inf, inf])
>>> inf_like([0, 1, 2, 3]) # integer list should give float output
array([inf, inf, inf, inf])
"""
return inf if np.isscalar(x) else np.full_like(x, inf, dtype=float)
def aszarr(z):
"""
Redshift as a `~numbers.Number` or `~numpy.ndarray` / |Quantity| / |Column|.
Allows for any ndarray ducktype by checking for attribute "shape".
"""
if isinstance(z, (Number, np.generic)): # scalars
return z
elif hasattr(z, "shape"): # ducktypes NumPy array
if hasattr(z, "unit"): # Quantity Column
return (z << cu.redshift).value # for speed only use enabled equivs
return z
# not one of the preferred types: Number / array ducktype
return Quantity(z, cu.redshift).value
|
af167934d42107f854842b239855a5ae9899fd37e66c52fbcfb05dca24a053ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import functools
import inspect
from types import FunctionType, MappingProxyType
import numpy as np
import astropy.units as u
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import CosmologyFromFormat, CosmologyRead, CosmologyToFormat, CosmologyWrite
from .parameter import Parameter
# Originally authored by Andrew Becker (becker@astro.washington.edu),
# and modified by Neil Crighton (neilcrighton@gmail.com), Roban Kramer
# (robanhk@gmail.com), and Nathaniel Starkman (n.starkman@mail.utoronto.ca).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the values
of the parameters. That is, all of the above attributes (except meta) are
read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__ = ()
__all_parameters__ = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# register as a Cosmology subclass
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications.
If any parameter is changed and a new name is not given, the name
will be set to "[old name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no modifications are requested, then a reference to this object
is returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> newcosmo = Planck13.clone(name="Modified Planck 2013", Om0=0.35)
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
kwargs.setdefault("name", (self.name + " (modified)"
if self.name is not None else None))
# mix new meta into existing, preferring the former.
new_meta = {**self.meta, **(meta or {})}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Return new instance, respecting args vs kwargs
return self.__class__(*ba.args, **ba.kwargs)
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other, *, format=False):
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object in which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
# Allow for different formats to be considered equivalent.
if format is not False:
format = None if format is True else format # str->str, None/True->None
try:
other = Cosmology.from_format(other, format=format)
except Exception: # TODO! should enforce only TypeError
return False
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
equiv = self.__equiv__(other)
if equiv is NotImplemented and hasattr(other, "__equiv__"):
equiv = other.__equiv__(self) # that failed, try from 'other'
return equiv if equiv is not NotImplemented else False
def __equiv__(self, other):
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if 'other' is from a different class.
`True` if 'other' is of the same class and has matching parameters
and parameter values. `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
params_eq = (set(self.__all_parameters__) == set(other.__all_parameters__)
and all(np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__))
return params_eq
def __eq__(self, other):
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
# check all parameters in 'other' match those in 'self'
equivalent = self.__equiv__(other)
# non-Parameter checks: name
name_eq = (self.name == other.name)
return equivalent and name_eq
# ---------------------------------------------------------------
def __repr__(self):
ps = {k: getattr(self, k) for k in self.__parameters__} # values
cps = {k: getattr(self.__class__, k) for k in self.__parameters__} # Parameter objects
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f"name=\"{self.name}\", "
# nicely formatted parameters
fmtps = (k + '=' + format(v, cps[k].format_spec if v is not None else '')
for k, v in ps.items())
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
# -----------------------------------------------------------------------------
def __getattr__(attr):
from . import flrw
if hasattr(flrw, attr):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and "
f"should be imported as ``from astropy.cosmology import {attr}``."
" In future this will raise an exception.",
AstropyDeprecationWarning
)
return getattr(flrw, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
359ff16a32d960f853bcbe1012d4994b8646a21467e97ff1d3d089707f5331a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.units as u
from astropy.utils.decorators import classproperty
__all__ = ["Parameter"]
class Parameter:
r"""Cosmological parameter (descriptor).
Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass.
Parameters
----------
derived : bool (optional, keyword-only)
Whether the Parameter is 'derived', default `False`.
Derived parameters behave similarly to normal parameters, but are not
sorted by the |Cosmology| signature (probably not there) and are not
included in all methods. For reference, see ``Ode0`` in
``FlatFLRWMixin``, which removes :math:`\Omega_{de,0}`` as an
independent parameter (:math:`\Omega_{de,0} \equiv 1 - \Omega_{tot}`).
unit : unit-like or None (optional, keyword-only)
The `~astropy.units.Unit` for the Parameter. If None (default) no
unit as assumed.
equivalencies : `~astropy.units.Equivalency` or sequence thereof
Unit equivalencies for this Parameter.
fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only)
Function to validate the Parameter value from instances of the
cosmology class. If "default", uses default validator to assign units
(with equivalencies), if Parameter has units.
For other valid string options, see ``Parameter._registry_validators``.
'fvalidate' can also be set through a decorator with
:meth:`~astropy.cosmology.Parameter.validator`.
fmt : str (optional, keyword-only)
`format` specification, used when making string representation
of the containing Cosmology.
See https://docs.python.org/3/library/string.html#formatspec
doc : str or None (optional, keyword-only)
Parameter description.
Examples
--------
For worked examples see :class:`~astropy.cosmology.FLRW`.
"""
_registry_validators = {}
def __init__(self, *, derived=False, unit=None, equivalencies=[],
fvalidate="default", fmt="", doc=None):
# attribute name on container cosmology class.
# really set in __set_name__, but if Parameter is not init'ed as a
# descriptor this ensures that the attributes exist.
self._attr_name = self._attr_name_private = None
self._derived = derived
self._fmt = str(fmt) # @property is `format_spec`
self.__doc__ = doc
# units stuff
self._unit = u.Unit(unit) if unit is not None else None
self._equivalencies = equivalencies
# Parse registered `fvalidate`
self._fvalidate_in = fvalidate # Always store input fvalidate.
if callable(fvalidate):
pass
elif fvalidate in self._registry_validators:
fvalidate = self._registry_validators[fvalidate]
elif isinstance(fvalidate, str):
raise ValueError("`fvalidate`, if str, must be in "
f"{self._registry_validators.keys()}")
else:
raise TypeError("`fvalidate` must be a function or "
f"{self._registry_validators.keys()}")
self._fvalidate = fvalidate
def __set_name__(self, cosmo_cls, name):
# attribute name on container cosmology class
self._attr_name = name
self._attr_name_private = "_" + name
@property
def name(self):
"""Parameter name."""
return self._attr_name
@property
def unit(self):
"""Parameter unit."""
return self._unit
@property
def equivalencies(self):
"""Equivalencies used when initializing Parameter."""
return self._equivalencies
@property
def format_spec(self):
"""String format specification."""
return self._fmt
@property
def derived(self):
"""Whether the Parameter is derived; true parameters are not."""
return self._derived
# -------------------------------------------
# descriptor and property-like methods
def __get__(self, cosmology, cosmo_cls=None):
# get from class
if cosmology is None:
return self
return getattr(cosmology, self._attr_name_private)
def __set__(self, cosmology, value):
"""Allows attribute setting once. Raises AttributeError subsequently."""
# raise error if setting 2nd time.
if hasattr(cosmology, self._attr_name_private):
raise AttributeError("can't set attribute")
# validate value, generally setting units if present
value = self.validate(cosmology, value)
setattr(cosmology, self._attr_name_private, value)
# -------------------------------------------
# validate value
@property
def fvalidate(self):
"""Function to validate a potential value of this Parameter.."""
return self._fvalidate
def validator(self, fvalidate):
"""Make new Parameter with custom ``fvalidate``.
Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator.
Parameters
----------
fvalidate : callable[[type, type, Any], Any]
Returns
-------
`~astropy.cosmology.Parameter`
Copy of this Parameter but with custom ``fvalidate``.
"""
return self.clone(fvalidate=fvalidate)
def validate(self, cosmology, value):
"""Run the validator on this Parameter.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` instance
value : Any
The object to validate.
Returns
-------
Any
The output of calling ``fvalidate(cosmology, self, value)``
(yes, that parameter order).
"""
return self.fvalidate(cosmology, self, value)
@classmethod
def register_validator(cls, key, fvalidate=None):
"""Decorator to register a new kind of validator function.
Parameters
----------
key : str
fvalidate : callable[[object, object, Any], Any] or None, optional
Value validation function.
Returns
-------
``validator`` or callable[``validator``]
if validator is None returns a function that takes and registers a
validator. This allows ``register_validator`` to be used as a
decorator.
"""
if key in cls._registry_validators:
raise KeyError(f"validator {key!r} already registered with Parameter.")
# fvalidate directly passed
if fvalidate is not None:
cls._registry_validators[key] = fvalidate
return fvalidate
# for use as a decorator
def register(fvalidate):
"""Register validator function.
Parameters
----------
fvalidate : callable[[object, object, Any], Any]
Validation function.
Returns
-------
``validator``
"""
cls._registry_validators[key] = fvalidate
return fvalidate
return register
# -------------------------------------------
def _get_init_arguments(self, processed=False):
"""Initialization arguments.
Parameters
----------
processed : bool
Whether to more closely reproduce the input arguments (`False`,
default) or the processed arguments (`True`). The former is better
for string representations and round-tripping with ``eval(repr())``.
Returns
-------
dict[str, Any]
"""
# The keys are added in this order because `repr` prints them in order.
kw = {"derived": self.derived,
"unit": self.unit,
"equivalencies": self.equivalencies,
# Validator is always turned into a function, but for ``repr`` it's nice
# to know if it was originally a string.
"fvalidate": self.fvalidate if processed else self._fvalidate_in,
"fmt": self.format_spec,
"doc": self.__doc__}
return kw
def clone(self, **kw):
"""Clone this `Parameter`, changing any constructor argument.
Parameters
----------
**kw
Passed to constructor. The current values, eg. ``fvalidate`` are
used as the default values, so an empty ``**kw`` is an exact copy.
Examples
--------
>>> p = Parameter()
>>> p
Parameter(derived=False, unit=None, equivalencies=[],
fvalidate='default', fmt='', doc=None)
>>> p.clone(unit="km")
Parameter(derived=False, unit=Unit("km"), equivalencies=[],
fvalidate='default', fmt='', doc=None)
"""
# Start with defaults, update from kw.
kwargs = {**self._get_init_arguments(), **kw}
# All initialization failures, like incorrect input are handled by init
cloned = type(self)(**kwargs)
# Transfer over the __set_name__ stuff. If `clone` is used to make a
# new descriptor, __set_name__ will be called again, overwriting this.
cloned._attr_name = self._attr_name
cloned._attr_name_private = self._attr_name_private
return cloned
def __eq__(self, other):
"""Check Parameter equality. Only equal to other Parameter objects.
Returns
-------
NotImplemented or True
`True` if equal, `NotImplemented` otherwise. This allows `other` to
be check for equality with ``other.__eq__``.
Examples
--------
>>> p1, p2 = Parameter(unit="km"), Parameter(unit="km")
>>> p1 == p2
True
>>> p3 = Parameter(unit="km / s")
>>> p3 == p1
False
>>> p1 != 2
True
"""
if not isinstance(other, Parameter):
return NotImplemented
# Check equality on all `_init_arguments` & `name`.
# Need to compare the processed arguments because the inputs are many-
# to-one, e.g. `fvalidate` can be a string or the equivalent function.
return ((self._get_init_arguments(True) == other._get_init_arguments(True))
and (self.name == other.name))
def __repr__(self):
"""String representation.
``eval(repr())`` should work, depending if contents like ``fvalidate``
can be similarly round-tripped.
"""
return "Parameter({})".format(", ".join(f"{k}={v!r}" for k, v in
self._get_init_arguments().items()))
# ===================================================================
# Built-in validators
@Parameter.register_validator("default")
def _validate_with_unit(cosmology, param, value):
"""
Default Parameter value validator.
Adds/converts units if Parameter has a unit.
"""
if param.unit is not None:
with u.add_enabled_equivalencies(param.equivalencies):
value = u.Quantity(value, param.unit)
return value
@Parameter.register_validator("float")
def _validate_to_float(cosmology, param, value):
"""Parameter value validator with units, and converted to float."""
value = _validate_with_unit(cosmology, param, value)
return float(value)
@Parameter.register_validator("scalar")
def _validate_to_scalar(cosmology, param, value):
""""""
value = _validate_with_unit(cosmology, param, value)
if not value.isscalar:
raise ValueError(f"{param.name} is a non-scalar quantity")
return value
@Parameter.register_validator("non-negative")
def _validate_non_negative(cosmology, param, value):
"""Parameter value validator where value is a positive float."""
value = _validate_to_float(cosmology, param, value)
if value < 0.0:
raise ValueError(f"{param.name} cannot be negative.")
return value
|
6c5f2c742ebd1b3237e201af83be58da5518b507dd607abe6ae185575e72ad98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convenience functions for `astropy.cosmology`.
"""
import warnings
import numpy as np
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
from . import units as cu
from .core import CosmologyError
__all__ = ['z_at_value']
__doctest_requires__ = {'*': ['scipy']}
def _z_at_scalar_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500,
method='Brent', bracket=None, verbose=False):
"""
Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.funcs.z_at_value`.
"""
from scipy.optimize import minimize_scalar
opt = {'maxiter': maxfun}
# Assume custom methods support the same options as default; otherwise user
# will see warnings.
if str(method).lower() == 'bounded':
opt['xatol'] = ztol
if bracket is not None:
warnings.warn(f"Option 'bracket' is ignored by method {method}.")
bracket = None
else:
opt['xtol'] = ztol
# fval falling inside the interval of bracketing function values does not
# guarantee it has a unique solution, but for Standard Cosmological
# quantities normally should (being monotonic or having a single extremum).
# In these cases keep solver from returning solutions outside of bracket.
fval_zmin, fval_zmax = func(zmin), func(zmax)
nobracket = False
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
if bracket is None:
nobracket = True
else:
fval_brac = func(np.asanyarray(bracket))
if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval):
nobracket = True
else:
zmin, zmax = bracket[0], bracket[-1]
fval_zmin, fval_zmax = fval_brac[[0, -1]]
if nobracket:
warnings.warn(f"fval is not bracketed by func(zmin)={fval_zmin} and "
f"func(zmax)={fval_zmax}. This means either there is no "
"solution, or that there is more than one solution "
"between zmin and zmax satisfying fval = func(z).",
AstropyUserWarning)
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
else:
val = fval
# 'Brent' and 'Golden' ignore `bounds`, force solution inside zlim
def f(z):
if z > zmax:
return 1.e300 * (1.0 + z - zmax)
elif z < zmin:
return 1.e300 * (1.0 + zmin - z)
elif isinstance(fval_zmin, Quantity):
return abs(func(z).value - val)
else:
return abs(func(z) - val)
res = minimize_scalar(f, method=method, bounds=(zmin, zmax),
bracket=bracket, options=opt)
# Scipy docs state that `OptimizeResult` always has 'status' and 'message'
# attributes, but only `_minimize_scalar_bounded()` seems to have really
# implemented them.
if not res.success:
warnings.warn(f"Solver returned {res.get('status')}: {res.get('message', 'Unsuccessful')}\n"
f"Precision {res.fun} reached after {res.nfev} function calls.",
AstropyUserWarning)
if verbose:
print(res)
if np.allclose(res.x, zmax):
raise CosmologyError(
f"Best guess z={res.x} is very close to the upper z limit {zmax}."
"\nTry re-running with a different zmax.")
elif np.allclose(res.x, zmin):
raise CosmologyError(
f"Best guess z={res.x} is very close to the lower z limit {zmin}."
"\nTry re-running with a different zmin.")
return res.x
def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500,
method='Brent', bracket=None, verbose=False):
"""Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : `~astropy.units.Quantity`
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
Returns
-------
z : `~astropy.units.Quantity` ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift>
"""
# `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer`
# so we strip it of units for broadcasting and restore the units when
# passing the elements to `_z_at_scalar_value`.
fval = np.asanyarray(fval)
unit = getattr(fval, 'unit', 1) # can be unitless
zmin = Quantity(zmin, cu.redshift).value # must be unitless
zmax = Quantity(zmax, cu.redshift).value
# bracket must be an object array (assumed to be correct) or a 'scalar'
# bracket: 2 or 3 elt sequence
if not isinstance(bracket, np.ndarray): # 'scalar' bracket
if bracket is not None and len(bracket) not in (2, 3):
raise ValueError("`bracket` is not an array "
"nor a 2 (or 3) element sequence.")
else: # munge bracket into a 1-elt object array
bracket = np.array([bracket, ()], dtype=object)[:1].squeeze()
if bracket.dtype != np.object_:
raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'")
# make multi-dimensional iterator for all but `method`, `verbose`
with np.nditer(
[fval, zmin, zmax, ztol, maxfun, bracket, None],
flags = ['refs_ok'],
op_flags = [*[['readonly']] * 6, # β inputs output β
['writeonly', 'allocate', 'no_subtype']],
op_dtypes = (*(None,)*6, fval.dtype),
casting="no",
) as it:
for fv, zmn, zmx, zt, mfe, bkt, zs in it: # β eltwise unpack & eval β
zs[...] = _z_at_scalar_value(func, fv * unit, zmin=zmn, zmax=zmx,
ztol=zt, maxfun=mfe, bracket=bkt.item(),
# not broadcasted
method=method, verbose=verbose)
# since bracket is an object array, the output will be too, so it is
# cast to the same type as the function value.
result = it.operands[-1] # zs
return result << cu.redshift
|
089651864317bba25e07875b72e9a461233662cb29a69224b2a40a6609639bb8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains astronomical and physical constants for use in Astropy or other
places.
A typical use case might be::
>>> from astropy.constants import c, m_e
>>> # ... define the mass of something you want the rest energy of as m ...
>>> m = m_e
>>> E = m * c**2
>>> E.to('MeV') # doctest: +FLOAT_CMP
<Quantity 0.510998927603161 MeV>
"""
import warnings
from astropy.utils import find_current_module
# Hack to make circular imports with units work
# isort: split
from astropy import units
del units
from . import cgs # noqa
from . import si # noqa
from . import utils as _utils # noqa
from .config import codata, iaudata # noqa
from .constant import Constant, EMConstant # noqa
# for updating the constants module docstring
_lines = [
'The following constants are available:\n',
'========== ============== ================ =========================',
' Name Value Unit Description',
'========== ============== ================ =========================',
]
# Catch warnings about "already has a definition in the None system"
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Constant .*already has a definition')
_utils._set_c(codata, iaudata, find_current_module(),
not_in_module_only=True, doclines=_lines, set_class=True)
_lines.append(_lines[1])
if __doc__ is not None:
__doc__ += '\n'.join(_lines)
# Clean up namespace
del find_current_module
del warnings
del _utils
del _lines
|
9a3dea90e2172c28bbc5a2eb2695408d7594a87f2a3dea949b2c21eda17b3926 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Configures the codata and iaudata used, possibly using user configuration.
"""
# Note: doing this in __init__ causes import problems with units,
# as si.py and cgs.py have to import the result.
import importlib
import astropy
phys_version = astropy.physical_constants.get()
astro_version = astropy.astronomical_constants.get()
codata = importlib.import_module('.constants.' + phys_version, 'astropy')
iaudata = importlib.import_module('.constants.' + astro_version, 'astropy')
|
b0178bb8388a3015d0974ce61b2fe5bf201adea6495584ade50f10ab30540c78 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v1.3 and earlier.
See :mod:`astropy.constants` for a complete listing of constants
defined in Astropy.
"""
from astropy.utils import find_current_module
from . import codata2010, iau2012
from . import utils as _utils
codata = codata2010
iaudata = iau2012
_utils._set_c(codata, iaudata, find_current_module())
# Clean up namespace
del find_current_module
del _utils
|
962480a7a67d3204612f2c04dc495749b714fd32724ba9c8beea205bcbff4961 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
class CODATA2014(Constant):
default_reference = 'CODATA 2014'
_registry = {}
_has_incompatible_units = set()
class EMCODATA2014(CODATA2014, EMConstant):
_registry = CODATA2014._registry
h = CODATA2014('h', "Planck constant", 6.626070040e-34,
'J s', 0.000000081e-34, system='si')
hbar = CODATA2014('hbar', "Reduced Planck constant", 1.054571800e-34,
'J s', 0.000000013e-34, system='si')
k_B = CODATA2014('k_B', "Boltzmann constant", 1.38064852e-23,
'J / (K)', 0.00000079e-23, system='si')
c = CODATA2014('c', "Speed of light in vacuum", 299792458.,
'm / (s)', 0.0, system='si')
G = CODATA2014('G', "Gravitational constant", 6.67408e-11,
'm3 / (kg s2)', 0.00031e-11, system='si')
g0 = CODATA2014('g0', "Standard acceleration of gravity", 9.80665,
'm / s2', 0.0, system='si')
m_p = CODATA2014('m_p', "Proton mass", 1.672621898e-27,
'kg', 0.000000021e-27, system='si')
m_n = CODATA2014('m_n', "Neutron mass", 1.674927471e-27,
'kg', 0.000000021e-27, system='si')
m_e = CODATA2014('m_e', "Electron mass", 9.10938356e-31,
'kg', 0.00000011e-31, system='si')
u = CODATA2014('u', "Atomic mass", 1.660539040e-27,
'kg', 0.000000020e-27, system='si')
sigma_sb = CODATA2014('sigma_sb', "Stefan-Boltzmann constant", 5.670367e-8,
'W / (K4 m2)', 0.000013e-8, system='si')
e = EMCODATA2014('e', 'Electron charge', 1.6021766208e-19,
'C', 0.0000000098e-19, system='si')
eps0 = EMCODATA2014('eps0', 'Electric constant', 8.854187817e-12,
'F/m', 0.0, system='si')
N_A = CODATA2014('N_A', "Avogadro's number", 6.022140857e23,
'1 / (mol)', 0.000000074e23, system='si')
R = CODATA2014('R', "Gas constant", 8.3144598,
'J / (K mol)', 0.0000048, system='si')
Ryd = CODATA2014('Ryd', 'Rydberg constant', 10973731.568508,
'1 / (m)', 0.000065, system='si')
a0 = CODATA2014('a0', "Bohr radius", 0.52917721067e-10,
'm', 0.00000000012e-10, system='si')
muB = CODATA2014('muB', "Bohr magneton", 927.4009994e-26,
'J/T', 0.00002e-26, system='si')
alpha = CODATA2014('alpha', "Fine-structure constant", 7.2973525664e-3,
'', 0.0000000017e-3, system='si')
atm = CODATA2014('atm', "Standard atmosphere", 101325,
'Pa', 0.0, system='si')
mu0 = CODATA2014('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0,
system='si')
sigma_T = CODATA2014('sigma_T', "Thomson scattering cross-section",
0.66524587158e-28, 'm2', 0.00000000091e-28,
system='si')
b_wien = CODATA2014('b_wien', 'Wien wavelength displacement law constant',
2.8977729e-3, 'm K', 0.0000017e-3, system='si')
# cgs constants
# Only constants that cannot be converted directly from S.I. are defined here.
e_esu = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0,
'statC', e.uncertainty * c.value * 10.0, system='esu')
e_emu = EMCODATA2014(e.abbrev, e.name, e.value / 10, 'abC',
e.uncertainty / 10, system='emu')
e_gauss = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0,
'Fr', e.uncertainty * c.value * 10.0, system='gauss')
|
bcbc46772575b503a3faf8cbff3d5bbcff77ca65a48021cb1c36552ae5ab0373 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v4.0.
See :mod:`astropy.constants` for a complete listing of constants defined
in Astropy.
"""
import warnings
from astropy.utils import find_current_module
from . import codata2018, iau2015
from . import utils as _utils
codata = codata2018
iaudata = iau2015
_utils._set_c(codata, iaudata, find_current_module())
# Overwrite the following for consistency.
# https://github.com/astropy/astropy/issues/8920
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Constant .*already has a definition')
# Solar mass (derived from mass parameter and gravitational constant)
M_sun = iau2015.IAU2015(
'M_sun', "Solar mass", iau2015.GM_sun.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_sun.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Jupiter mass (derived from mass parameter and gravitational constant)
M_jup = iau2015.IAU2015(
'M_jup', "Jupiter mass", iau2015.GM_jup.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_jup.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Earth mass (derived from mass parameter and gravitational constant)
M_earth = iau2015.IAU2015(
'M_earth', "Earth mass",
iau2015.GM_earth.value / codata2018.G.value,
'kg', ((codata2018.G.uncertainty / codata2018.G.value) *
(iau2015.GM_earth.value / codata2018.G.value)),
f"IAU 2015 Resolution B 3 + {codata2018.G.reference}", system='si')
# Clean up namespace
del warnings
del find_current_module
del _utils
|
b039f90e668a1e39fdf5c494c7274c2beefaa71499ee7dd66f7d5b051d0a1a6c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .config import codata
from .constant import Constant
# ASTRONOMICAL CONSTANTS
class IAU2015(Constant):
default_reference = 'IAU 2015'
_registry = {}
_has_incompatible_units = set()
# DISTANCE
# Astronomical Unit (did not change from 2012)
au = IAU2015('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0,
"IAU 2012 Resolution B2", system='si')
# Parsec
pc = IAU2015('pc', "Parsec", au.value / np.radians(1. / 3600.), 'm',
au.uncertainty / np.radians(1. / 3600.),
"Derived from au + IAU 2015 Resolution B 2 note [4]", system='si')
# Kiloparsec
kpc = IAU2015('kpc', "Kiloparsec",
1000. * au.value / np.radians(1. / 3600.), 'm',
1000. * au.uncertainty / np.radians(1. / 3600.),
"Derived from au + IAU 2015 Resolution B 2 note [4]", system='si')
# Luminosity
L_bol0 = IAU2015('L_bol0', "Luminosity for absolute bolometric magnitude 0",
3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si')
# SOLAR QUANTITIES
# Solar luminosity
L_sun = IAU2015('L_sun', "Nominal solar luminosity", 3.828e26,
'W', 0.0, "IAU 2015 Resolution B 3", system='si')
# Solar mass parameter
GM_sun = IAU2015('GM_sun', 'Nominal solar mass parameter', 1.3271244e20,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Solar mass (derived from mass parameter and gravitational constant)
M_sun = IAU2015('M_sun', "Solar mass", GM_sun.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_sun.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Solar radius
R_sun = IAU2015('R_sun', "Nominal solar radius", 6.957e8, 'm', 0.0,
"IAU 2015 Resolution B 3", system='si')
# OTHER SOLAR SYSTEM QUANTITIES
# Jupiter mass parameter
GM_jup = IAU2015('GM_jup', 'Nominal Jupiter mass parameter', 1.2668653e17,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Jupiter mass (derived from mass parameter and gravitational constant)
M_jup = IAU2015('M_jup', "Jupiter mass", GM_jup.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_jup.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Jupiter equatorial radius
R_jup = IAU2015('R_jup', "Nominal Jupiter equatorial radius", 7.1492e7,
'm', 0.0, "IAU 2015 Resolution B 3", system='si')
# Earth mass parameter
GM_earth = IAU2015('GM_earth', 'Nominal Earth mass parameter', 3.986004e14,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Earth mass (derived from mass parameter and gravitational constant)
M_earth = IAU2015('M_earth', "Earth mass",
GM_earth.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_earth.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Earth equatorial radius
R_earth = IAU2015('R_earth', "Nominal Earth equatorial radius", 6.3781e6,
'm', 0.0, "IAU 2015 Resolution B 3", system='si')
|
72b4dd0198b2a91b998465ebd1f7f7dd57e2724af501ee3fe91131f0fab930ac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in cgs units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import itertools
from .config import codata, iaudata
from .constant import Constant
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if (isinstance(_c, Constant) and _c.abbrev not in locals()
and _c.system in ['esu', 'gauss', 'emu']):
locals()[_c.abbrev] = _c
|
2b052e76622356a4fb1d7bb817a70f3191e445a7cb445f94d90ea01556079572 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .constant import Constant
# ASTRONOMICAL CONSTANTS
class IAU2012(Constant):
default_reference = 'IAU 2012'
_registry = {}
_has_incompatible_units = set()
# DISTANCE
# Astronomical Unit
au = IAU2012('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0,
"IAU 2012 Resolution B2", system='si')
# Parsec
pc = IAU2012('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm',
au.uncertainty / np.tan(np.radians(1. / 3600.)),
"Derived from au", system='si')
# Kiloparsec
kpc = IAU2012('kpc', "Kiloparsec",
1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm',
1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)),
"Derived from au", system='si')
# Luminosity not defined till 2015 (https://arxiv.org/abs/1510.06262)
L_bol0 = IAU2012('L_bol0', "Luminosity for absolute bolometric magnitude 0",
3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si')
# SOLAR QUANTITIES
# Solar luminosity
L_sun = IAU2012('L_sun', "Solar luminosity", 3.846e26, 'W', 0.0005e26,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Solar mass
M_sun = IAU2012('M_sun', "Solar mass", 1.9891e30, 'kg', 0.00005e30,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Solar radius
R_sun = IAU2012('R_sun', "Solar radius", 6.95508e8, 'm', 0.00026e8,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# OTHER SOLAR SYSTEM QUANTITIES
# Jupiter mass
M_jup = IAU2012('M_jup', "Jupiter mass", 1.8987e27, 'kg', 0.00005e27,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Jupiter equatorial radius
R_jup = IAU2012('R_jup', "Jupiter equatorial radius", 7.1492e7, 'm',
0.00005e7, "Allen's Astrophysical Quantities 4th Ed.",
system='si')
# Earth mass
M_earth = IAU2012('M_earth', "Earth mass", 5.9742e24, 'kg', 0.00005e24,
"Allen's Astrophysical Quantities 4th Ed.", system='si')
# Earth equatorial radius
R_earth = IAU2012('R_earth', "Earth equatorial radius", 6.378136e6, 'm',
0.0000005e6, "Allen's Astrophysical Quantities 4th Ed.",
system='si')
|
88f385a7cb4c00df5cd822b50da19f8f261818590511e060e06009f3e48221a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v2.0.
See :mod:`astropy.constants` for a complete listing of constants defined
in Astropy.
"""
import warnings
from astropy.utils import find_current_module
from . import codata2014, iau2015
from . import utils as _utils
codata = codata2014
iaudata = iau2015
_utils._set_c(codata, iaudata, find_current_module())
# Overwrite the following for consistency.
# https://github.com/astropy/astropy/issues/8920
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Constant .*already has a definition')
# Solar mass (derived from mass parameter and gravitational constant)
M_sun = iau2015.IAU2015(
'M_sun', "Solar mass", iau2015.GM_sun.value / codata2014.G.value,
'kg', ((codata2014.G.uncertainty / codata2014.G.value) *
(iau2015.GM_sun.value / codata2014.G.value)),
f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system='si')
# Jupiter mass (derived from mass parameter and gravitational constant)
M_jup = iau2015.IAU2015(
'M_jup', "Jupiter mass", iau2015.GM_jup.value / codata2014.G.value,
'kg', ((codata2014.G.uncertainty / codata2014.G.value) *
(iau2015.GM_jup.value / codata2014.G.value)),
f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system='si')
# Earth mass (derived from mass parameter and gravitational constant)
M_earth = iau2015.IAU2015(
'M_earth', "Earth mass",
iau2015.GM_earth.value / codata2014.G.value,
'kg', ((codata2014.G.uncertainty / codata2014.G.value) *
(iau2015.GM_earth.value / codata2014.G.value)),
f"IAU 2015 Resolution B 3 + {codata2014.G.reference}", system='si')
# Clean up namespace
del warnings
del find_current_module
del _utils
|
185b5aa991e83f454530c0e08b0d3349a67b22157b508e9098d1d0d3bd834749 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utility functions for ``constants`` sub-package."""
import itertools
__all__ = []
def _get_c(codata, iaudata, module, not_in_module_only=True):
"""
Generator to return a Constant object.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module of interest.
not_in_module_only : bool
If ``True``, ignore constants that are already in the
namespace of ``module``.
Returns
-------
_c : Constant
Constant object to process.
"""
from .constant import Constant
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if not isinstance(_c, Constant):
continue
elif (not not_in_module_only) or (_c.abbrev not in module.__dict__):
yield _c
def _set_c(codata, iaudata, module, not_in_module_only=True, doclines=None,
set_class=False):
"""
Set constants in a given module namespace.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module to modify with the given ``codata`` and ``iaudata``.
not_in_module_only : bool
If ``True``, constants that are already in the namespace
of ``module`` will not be modified.
doclines : list or None
If a list is given, this list will be modified in-place to include
documentation of modified constants. This can be used to update
docstring of ``module``.
set_class : bool
Namespace of ``module`` is populated with ``_c.__class__``
instead of just ``_c`` from :func:`_get_c`.
"""
for _c in _get_c(codata, iaudata, module,
not_in_module_only=not_in_module_only):
if set_class:
value = _c.__class__(_c.abbrev, _c.name, _c.value,
_c._unit_string, _c.uncertainty,
_c.reference)
else:
value = _c
setattr(module, _c.abbrev, value)
if doclines is not None:
doclines.append('{:^10} {:^14.9g} {:^16} {}'.format(
_c.abbrev, _c.value, _c._unit_string, _c.name))
|
00c41d4824c990c4eb4e3ce78c573bb001c0835bddee0dfd3c55fbcbe31883c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import math
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
# https://en.wikipedia.org/wiki/2019_redefinition_of_SI_base_units
class CODATA2018(Constant):
default_reference = 'CODATA 2018'
_registry = {}
_has_incompatible_units = set()
class EMCODATA2018(CODATA2018, EMConstant):
_registry = CODATA2018._registry
h = CODATA2018('h', "Planck constant", 6.62607015e-34,
'J s', 0.0, system='si')
hbar = CODATA2018('hbar', "Reduced Planck constant", h.value / (2 * math.pi),
'J s', 0.0, system='si')
k_B = CODATA2018('k_B', "Boltzmann constant", 1.380649e-23,
'J / (K)', 0.0, system='si')
c = CODATA2018('c', "Speed of light in vacuum", 299792458.,
'm / (s)', 0.0, system='si')
G = CODATA2018('G', "Gravitational constant", 6.67430e-11,
'm3 / (kg s2)', 0.00015e-11, system='si')
g0 = CODATA2018('g0', "Standard acceleration of gravity", 9.80665,
'm / s2', 0.0, system='si')
m_p = CODATA2018('m_p', "Proton mass", 1.67262192369e-27,
'kg', 0.00000000051e-27, system='si')
m_n = CODATA2018('m_n', "Neutron mass", 1.67492749804e-27,
'kg', 0.00000000095e-27, system='si')
m_e = CODATA2018('m_e', "Electron mass", 9.1093837015e-31,
'kg', 0.0000000028e-31, system='si')
u = CODATA2018('u', "Atomic mass", 1.66053906660e-27,
'kg', 0.00000000050e-27, system='si')
sigma_sb = CODATA2018(
'sigma_sb', "Stefan-Boltzmann constant",
2 * math.pi ** 5 * k_B.value ** 4 / (15 * h.value ** 3 * c.value ** 2),
'W / (K4 m2)', 0.0, system='si')
e = EMCODATA2018('e', 'Electron charge', 1.602176634e-19,
'C', 0.0, system='si')
eps0 = EMCODATA2018('eps0', 'Vacuum electric permittivity', 8.8541878128e-12,
'F/m', 0.0000000013e-12, system='si')
N_A = CODATA2018('N_A', "Avogadro's number", 6.02214076e23,
'1 / (mol)', 0.0, system='si')
R = CODATA2018('R', "Gas constant", k_B.value * N_A.value,
'J / (K mol)', 0.0, system='si')
Ryd = CODATA2018('Ryd', 'Rydberg constant', 10973731.568160,
'1 / (m)', 0.000021, system='si')
a0 = CODATA2018('a0', "Bohr radius", 5.29177210903e-11,
'm', 0.00000000080e-11, system='si')
muB = CODATA2018('muB', "Bohr magneton", 9.2740100783e-24,
'J/T', 0.0000000028e-24, system='si')
alpha = CODATA2018('alpha', "Fine-structure constant", 7.2973525693e-3,
'', 0.0000000011e-3, system='si')
atm = CODATA2018('atm', "Standard atmosphere", 101325,
'Pa', 0.0, system='si')
mu0 = CODATA2018('mu0', "Vacuum magnetic permeability", 1.25663706212e-6,
'N/A2', 0.00000000019e-6, system='si')
sigma_T = CODATA2018('sigma_T', "Thomson scattering cross-section",
6.6524587321e-29, 'm2', 0.0000000060e-29,
system='si')
# Formula taken from NIST wall chart.
# The numerical factor is from a numerical solution to the equation for the
# maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law
b_wien = CODATA2018('b_wien', 'Wien wavelength displacement law constant',
h.value * c.value / (k_B.value * 4.965114231744276), 'm K',
0.0, system='si')
# CGS constants.
# Only constants that cannot be converted directly from S.I. are defined here.
# Because both e and c are exact, these are also exact by definition.
e_esu = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0,
'statC', 0.0, system='esu')
e_emu = EMCODATA2018(e.abbrev, e.name, e.value / 10, 'abC',
0.0, system='emu')
e_gauss = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0,
'Fr', 0.0, system='gauss')
|
9be6c25ddb4036b8127c2a045df1fb9f648f769af80e0c0b357549445ca6d986 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import types
import warnings
import numpy as np
from astropy.units.core import Unit, UnitsError
from astropy.units.quantity import Quantity
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Constant', 'EMConstant']
class ConstantMeta(type):
"""Metaclass for `~astropy.constants.Constant`. The primary purpose of this
is to wrap the double-underscore methods of `~astropy.units.Quantity`
which is the superclass of `~astropy.constants.Constant`.
In particular this wraps the operator overloads such as `__add__` to
prevent their use with constants such as ``e`` from being used in
expressions without specifying a system. The wrapper checks to see if the
constant is listed (by name) in ``Constant._has_incompatible_units``, a set
of those constants that are defined in different systems of units are
physically incompatible. It also performs this check on each `Constant` if
it hasn't already been performed (the check is deferred until the
`Constant` is actually used in an expression to speed up import times,
among other reasons).
"""
def __new__(mcls, name, bases, d):
def wrap(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
name_lower = self.name.lower()
instances = self._registry[name_lower]
if not self._checked_units:
for inst in instances.values():
try:
self.unit.to(inst.unit)
except UnitsError:
self._has_incompatible_units.add(name_lower)
self._checked_units = True
if (not self.system and
name_lower in self._has_incompatible_units):
systems = sorted([x for x in instances if x])
raise TypeError(
'Constant {!r} does not have physically compatible '
'units across all systems of units and cannot be '
'combined with other values without specifying a '
'system (eg. {}.{})'.format(self.abbrev, self.abbrev,
systems[0]))
return meth(self, *args, **kwargs)
return wrapper
# The wrapper applies to so many of the __ methods that it's easier to
# just exclude the ones it doesn't apply to
exclude = set(['__new__', '__array_finalize__', '__array_wrap__',
'__dir__', '__getattr__', '__init__', '__str__',
'__repr__', '__hash__', '__iter__', '__getitem__',
'__len__', '__bool__', '__quantity_subclass__',
'__setstate__'])
for attr, value in vars(Quantity).items():
if (isinstance(value, types.FunctionType) and
attr.startswith('__') and attr.endswith('__') and
attr not in exclude):
d[attr] = wrap(value)
return super().__new__(mcls, name, bases, d)
class Constant(Quantity, metaclass=ConstantMeta):
"""A physical or astronomical constant.
These objects are quantities that are meant to represent physical
constants.
"""
_registry = {}
_has_incompatible_units = set()
def __new__(cls, abbrev, name, value, unit, uncertainty,
reference=None, system=None):
if reference is None:
reference = getattr(cls, 'default_reference', None)
if reference is None:
raise TypeError(f"{cls} requires a reference.")
name_lower = name.lower()
instances = cls._registry.setdefault(name_lower, {})
# By-pass Quantity initialization, since units may not yet be
# initialized here, and we store the unit in string form.
inst = np.array(value).view(cls)
if system in instances:
warnings.warn('Constant {!r} already has a definition in the '
'{!r} system from {!r} reference'.format(
name, system, reference), AstropyUserWarning)
for c in instances.values():
if system is not None and not hasattr(c.__class__, system):
setattr(c, system, inst)
if c.system is not None and not hasattr(inst.__class__, c.system):
setattr(inst, c.system, c)
instances[system] = inst
inst._abbrev = abbrev
inst._name = name
inst._value = value
inst._unit_string = unit
inst._uncertainty = uncertainty
inst._reference = reference
inst._system = system
inst._checked_units = False
return inst
def __repr__(self):
return ('<{} name={!r} value={} uncertainty={} unit={!r} '
'reference={!r}>'.format(self.__class__, self.name, self.value,
self.uncertainty, str(self.unit),
self.reference))
def __str__(self):
return (' Name = {}\n'
' Value = {}\n'
' Uncertainty = {}\n'
' Unit = {}\n'
' Reference = {}'.format(self.name, self.value,
self.uncertainty, self.unit,
self.reference))
def __quantity_subclass__(self, unit):
return super().__quantity_subclass__(unit)[0], False
def copy(self):
"""
Return a copy of this `Constant` instance. Since they are by
definition immutable, this merely returns another reference to
``self``.
"""
return self
__deepcopy__ = __copy__ = copy
@property
def abbrev(self):
"""A typical ASCII text abbreviation of the constant, also generally
the same as the Python variable used for this constant.
"""
return self._abbrev
@property
def name(self):
"""The full name of the constant."""
return self._name
@lazyproperty
def _unit(self):
"""The unit(s) in which this constant is defined."""
return Unit(self._unit_string)
@property
def uncertainty(self):
"""The known absolute uncertainty in this constant's value."""
return self._uncertainty
@property
def reference(self):
"""The source used for the value of this constant."""
return self._reference
@property
def system(self):
"""The system of units in which this constant is defined (typically
`None` so long as the constant's units can be directly converted
between systems).
"""
return self._system
def _instance_or_super(self, key):
instances = self._registry[self.name.lower()]
inst = instances.get(key)
if inst is not None:
return inst
else:
return getattr(super(), key)
@property
def si(self):
"""If the Constant is defined in the SI system return that instance of
the constant, else convert to a Quantity in the appropriate SI units.
"""
return self._instance_or_super('si')
@property
def cgs(self):
"""If the Constant is defined in the CGS system return that instance of
the constant, else convert to a Quantity in the appropriate CGS units.
"""
return self._instance_or_super('cgs')
def __array_finalize__(self, obj):
for attr in ('_abbrev', '_name', '_value', '_unit_string',
'_uncertainty', '_reference', '_system'):
setattr(self, attr, getattr(obj, attr, None))
self._checked_units = getattr(obj, '_checked_units', False)
class EMConstant(Constant):
"""An electromagnetic constant."""
@property
def cgs(self):
"""Overridden for EMConstant to raise a `TypeError`
emphasizing that there are multiple EM extensions to CGS.
"""
raise TypeError("Cannot convert EM constants to cgs because there "
"are different systems for E.M constants within the "
"c.g.s system (ESU, Gaussian, etc.). Instead, "
"directly use the constant with the appropriate "
"suffix (e.g. e.esu, e.gauss, etc.).")
|
fd79b52eba117c7394745dc90764d9b527072abba1fd56f786b1d32527a232af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
class CODATA2010(Constant):
default_reference = 'CODATA 2010'
_registry = {}
_has_incompatible_units = set()
def __new__(cls, abbrev, name, value, unit, uncertainty,
reference=default_reference, system=None):
return super().__new__(
cls, abbrev, name, value, unit, uncertainty, reference, system)
class EMCODATA2010(CODATA2010, EMConstant):
_registry = CODATA2010._registry
h = CODATA2010('h', "Planck constant", 6.62606957e-34, 'J s',
0.00000029e-34, system='si')
hbar = CODATA2010('hbar', "Reduced Planck constant",
h.value * 0.5 / np.pi, 'J s',
h.uncertainty * 0.5 / np.pi,
h.reference, system='si')
k_B = CODATA2010('k_B', "Boltzmann constant", 1.3806488e-23, 'J / (K)',
0.0000013e-23, system='si')
c = CODATA2010('c', "Speed of light in vacuum", 2.99792458e8, 'm / (s)', 0.,
system='si')
G = CODATA2010('G', "Gravitational constant", 6.67384e-11, 'm3 / (kg s2)',
0.00080e-11, system='si')
g0 = CODATA2010('g0', "Standard acceleration of gravity", 9.80665, 'm / s2',
0.0, system='si')
m_p = CODATA2010('m_p', "Proton mass", 1.672621777e-27, 'kg', 0.000000074e-27,
system='si')
m_n = CODATA2010('m_n', "Neutron mass", 1.674927351e-27, 'kg', 0.000000074e-27,
system='si')
m_e = CODATA2010('m_e', "Electron mass", 9.10938291e-31, 'kg', 0.00000040e-31,
system='si')
u = CODATA2010('u', "Atomic mass", 1.660538921e-27, 'kg', 0.000000073e-27,
system='si')
sigma_sb = CODATA2010('sigma_sb', "Stefan-Boltzmann constant", 5.670373e-8,
'W / (K4 m2)', 0.000021e-8, system='si')
e = EMCODATA2010('e', 'Electron charge', 1.602176565e-19, 'C', 0.000000035e-19,
system='si')
eps0 = EMCODATA2010('eps0', 'Electric constant', 8.854187817e-12, 'F/m', 0.0,
system='si')
N_A = CODATA2010('N_A', "Avogadro's number", 6.02214129e23, '1 / (mol)',
0.00000027e23, system='si')
R = CODATA2010('R', "Gas constant", 8.3144621, 'J / (K mol)', 0.0000075,
system='si')
Ryd = CODATA2010('Ryd', 'Rydberg constant', 10973731.568539, '1 / (m)',
0.000055, system='si')
a0 = CODATA2010('a0', "Bohr radius", 0.52917721092e-10, 'm', 0.00000000017e-10,
system='si')
muB = CODATA2010('muB', "Bohr magneton", 927.400968e-26, 'J/T', 0.00002e-26,
system='si')
alpha = CODATA2010('alpha', "Fine-structure constant", 7.2973525698e-3,
'', 0.0000000024e-3, system='si')
atm = CODATA2010('atm', "Standard atmosphere", 101325, 'Pa', 0.0,
system='si')
mu0 = CODATA2010('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0,
system='si')
sigma_T = CODATA2010('sigma_T', "Thomson scattering cross-section",
0.6652458734e-28, 'm2', 0.0000000013e-28, system='si')
b_wien = Constant('b_wien', 'Wien wavelength displacement law constant',
2.8977721e-3, 'm K', 0.0000026e-3, 'CODATA 2010', system='si')
# cgs constants
# Only constants that cannot be converted directly from S.I. are defined here.
e_esu = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0,
'statC', e.uncertainty * c.value * 10.0, system='esu')
e_emu = EMCODATA2010(e.abbrev, e.name, e.value / 10, 'abC',
e.uncertainty / 10, system='emu')
e_gauss = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0,
'Fr', e.uncertainty * c.value * 10.0, system='gauss')
|
976ff12fa11e050043c7801f3a4cb51982819dfaa6e5ba50cab1302bf7277924 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import itertools
from .config import codata, iaudata
from .constant import Constant
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if (isinstance(_c, Constant) and _c.abbrev not in locals()
and _c.system == 'si'):
locals()[_c.abbrev] = _c
|
632176446a2d126e44536efde415c8afe681d88ecb99aab614a2142a5ebb5b60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io import registry
from .info import serialize_method_as
__all__ = ['TableRead', 'TableWrite']
__doctest_skip__ = ['TableRead', 'TableWrite']
class TableRead(registry.UnifiedReadWrite):
"""Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
Get help on the available readers for ``Table`` using the``help()`` method::
>>> Table.read.help() # Get help reading Table and list supported formats
>>> Table.read.help('fits') # Get detailed help on Table FITS reader
>>> Table.read.list_formats() # Print list of available formats
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is typically the input filename.
format : str
File format specifier.
units : list, dict, optional
List or dict of units to apply to columns
descriptions : list, dict, optional
List or dict of descriptions to apply to columns
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `~astropy.table.Table`
Table corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'read', registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
cls = self._cls
units = kwargs.pop('units', None)
descriptions = kwargs.pop('descriptions', None)
out = self.registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
out._set_column_attribute('unit', units)
out._set_column_attribute('description', descriptions)
return out
class TableWrite(registry.UnifiedReadWrite):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
Get help on the available writers for ``Table`` using the``help()`` method::
>>> Table.write.help() # Get help writing Table and list supported formats
>>> Table.write.help('fits') # Get detailed help on Table FITS writer
>>> Table.write.list_formats() # Print list of available formats
The ``serialize_method`` argument is explained in the section on
`Table serialization methods
<https://docs.astropy.org/en/latest/io/unified.html#table-serialization-methods>`_.
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'write', registry=None)
# uses default global registry
def __call__(self, *args, serialize_method=None, **kwargs):
instance = self._instance
with serialize_method_as(instance, serialize_method):
self.registry.write(instance, *args, **kwargs)
|
9f55f5b6c001ac7b387648d082eaf005e29735058b770c5ccbea1720f1fb5642 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.config as _config
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
__all__ = ['BST', 'Column', 'ColumnGroups', 'ColumnInfo', 'Conf',
'JSViewer', 'MaskedColumn', 'NdarrayMixin', 'QTable', 'Row',
'SCEngine', 'SerializedColumn', 'SortedArray', 'StringTruncateWarning',
'Table', 'TableAttribute', 'TableColumns', 'TableFormatter',
'TableGroups', 'TableMergeError', 'TableReplaceWarning', 'conf',
'connect', 'hstack', 'join', 'registry', 'represent_mixins_as_columns',
'setdiff', 'unique', 'vstack', 'dstack', 'conf', 'join_skycoord',
'join_distance', 'PprintIncludeExclude']
class Conf(_config.ConfigNamespace): # noqa
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <https://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
[],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'string_list')
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.")
conf = Conf() # noqa
from . import connect # noqa: E402
from .groups import TableGroups, ColumnGroups # noqa: E402
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning, TableAttribute,
PprintIncludeExclude) # noqa: E402
from .operations import (join, setdiff, hstack, dstack, vstack, unique, # noqa: E402
TableMergeError, join_skycoord, join_distance) # noqa: E402
from .bst import BST # noqa: E402
from .sorted_array import SortedArray # noqa: E402
from .soco import SCEngine # noqa: E402
from .serialize import SerializedColumn, represent_mixins_as_columns # noqa: E402
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry # noqa: E402
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.votable.connect
import astropy.io.misc.asdf.connect
import astropy.io.misc.pandas.connect # noqa: F401
|
c7a43c550ca093c289825e09947505b08851e8ed59f33d5e4b4baa243433acc7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from astropy.utils.data_info import ParentDtypeInfo
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size, 10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1, size, 3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in range(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == 'f':
data = np.arange(size, dtype=np.float64) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from astropy.utils.data import get_pkg_data_filename
from astropy.io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapperInfo(ParentDtypeInfo):
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
"""Represent Column as a dict that can be serialized."""
col = self._parent
out = {'data': col.data}
return out
def _construct_from_dict(self, map):
"""Construct Column from ``map``."""
data = map.pop('data')
out = self._parent_cls(data, **map)
return out
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array
TODO: think about the future of this class as it is mostly for demonstration
purposes (of the mixin protocol). Consider taking it out of core and putting
it into a tutorial. One advantage of having this in core is that it is
getting tested in the mixin testing though it doesn't work for multidim
data.
"""
info = ArrayWrapperInfo()
def __init__(self, data):
self.data = np.array(data)
if 'info' in getattr(data, '__dict__', ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item])
if 'info' in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Minimal equality testing, mostly for mixin unit tests"""
if isinstance(other, ArrayWrapper):
return self.data == other.data
else:
return self.data == other
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return f"<{self.__class__.__name__} name='{self.info.name}' data={self.data}>"
|
4b83259e6207d52d8d9b693d36812f8c57b56e520ae8e00263d32e7baa712a94 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1 2
Parameters
----------
option : str, callable, list of (str or callable)
Info option, defaults to 'attributes'.
out : file-like, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append(f'length={len(tbl)}')
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = list(tbl.columns.values())
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
# Original serialize_method dict, keyed by column name. This only
# gets used and set if there is an override.
original_sms = {}
if serialize_method:
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
|
ef336ec0932a28b8b9b88157706988ce3b213d6c365c2299e9bf4a64a0ef5be7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy.units import Unit, Quantity
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
# If the column has info defined, we copy it and adjust any indices
# to point to the copied column. By guarding with the if statement,
# we avoid side effects (of creating the default info instance).
if 'info' in col.__dict__:
newcol.info = col.info
if copy_indices and col.info.indices:
newcol.info.indices = deepcopy(col.info.indices)
for index in newcol.info.indices:
index.replace_col(col, newcol)
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {} class to True'
.format(self.__class__.__name__))
def _expand_string_array_for_values(arr, values):
"""
For string-dtype return a version of ``arr`` that is wide enough for ``values``.
If ``arr`` is not string-dtype or does not need expansion then return ``arr``.
Parameters
----------
arr : np.ndarray
Input array
values : scalar or array-like
Values for width comparison for string arrays
Returns
-------
arr_expanded : np.ndarray
"""
if arr.dtype.kind in ('U', 'S') and values is not np.ma.masked:
# Find the length of the longest string in the new values.
values_str_len = np.char.str_len(values).max()
# Determine character repeat count of arr.dtype. Returns a positive
# int or None (something like 'U0' is not possible in numpy). If new values
# are longer than current then make a new (wider) version of arr.
arr_str_len = dtype_bytes_or_chars(arr.dtype)
if arr_str_len and values_str_len > arr_str_len:
arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len)
arr = arr.astype(arr_dtype)
return arr
def _convert_sequence_data_to_array(data, dtype=None):
"""Convert N-d sequence-like data to ndarray or MaskedArray.
This is the core function for converting Python lists or list of lists to a
numpy array. This handles embedded np.ma.masked constants in ``data`` along
with the special case of an homogeneous list of MaskedArray elements.
Considerations:
- np.ma.array is about 50 times slower than np.array for list input. This
function avoids using np.ma.array on list input.
- np.array emits a UserWarning for embedded np.ma.masked, but only for int
or float inputs. For those it converts to np.nan and forces float dtype.
For other types np.array is inconsistent, for instance converting
np.ma.masked to "0.0" for str types.
- Searching in pure Python for np.ma.masked in ``data`` is comparable in
speed to calling ``np.array(data)``.
- This function may end up making two additional copies of input ``data``.
Parameters
----------
data : N-d sequence
Input data, typically list or list of lists
dtype : None or dtype-like
Output datatype (None lets np.array choose)
Returns
-------
np_data : np.ndarray or np.ma.MaskedArray
"""
np_ma_masked = np.ma.masked # Avoid repeated lookups of this object
# Special case of an homogeneous list of MaskedArray elements (see #8977).
# np.ma.masked is an instance of MaskedArray, so exclude those values.
if (hasattr(data, '__len__')
and len(data) > 0
and all(isinstance(val, np.ma.MaskedArray)
and val is not np_ma_masked for val in data)):
np_data = np.ma.array(data, dtype=dtype)
return np_data
# First convert data to a plain ndarray. If there are instances of np.ma.masked
# in the data this will issue a warning for int and float.
with warnings.catch_warnings(record=True) as warns:
# Ensure this warning from numpy is always enabled and that it is not
# converted to an error (which can happen during pytest).
warnings.filterwarnings('always', category=UserWarning,
message='.*converting a masked element.*')
# FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291
# and https://github.com/numpy/numpy/issues/18425.
warnings.filterwarnings('always', category=FutureWarning,
message='.*Promotion of numbers and bools to strings.*')
try:
np_data = np.array(data, dtype=dtype)
except np.ma.MaskError:
# Catches case of dtype=int with masked values, instead let it
# convert to float
np_data = np.array(data)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity.
# First try to interpret the data as Quantity. If that still fails then fall
# through to object
try:
np_data = Quantity(data, dtype)
except Exception:
dtype = object
np_data = np.array(data, dtype=dtype)
if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0):
# Implies input was a scalar or an empty list (e.g. initializing an
# empty table with pre-declared names and dtypes but no data). Here we
# need to fall through to initializing with the original data=[].
return data
# If there were no warnings and the data are int or float, then we are done.
# Other dtypes like string or complex can have masked values and the
# np.array() conversion gives the wrong answer (e.g. converting np.ma.masked
# to the string "0.0").
if len(warns) == 0 and np_data.dtype.kind in ('i', 'f'):
return np_data
# Now we need to determine if there is an np.ma.masked anywhere in input data.
# Make a statement like below to look for np.ma.masked in a nested sequence.
# Because np.array(data) succeeded we know that `data` has a regular N-d
# structure. Find ma_masked:
# any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data)
# Using this eval avoids creating a copy of `data` in the more-usual case of
# no masked elements.
any_statement = 'd0 is ma_masked'
for ii in reversed(range(np_data.ndim)):
if ii == 0:
any_statement = f'any({any_statement} for d0 in data)'
elif ii == np_data.ndim - 1:
any_statement = f'any(d{ii} is ma_masked for d{ii} in d{ii-1})'
else:
any_statement = f'any({any_statement} for d{ii} in d{ii-1})'
context = {'ma_masked': np.ma.masked, 'data': data}
has_masked = eval(any_statement, context)
# If there are any masks then explicitly change each one to a fill value and
# set a mask boolean array. If not has_masked then we're done.
if has_masked:
mask = np.zeros(np_data.shape, dtype=bool)
data_filled = np.array(data, dtype=object)
# Make type-appropriate fill value based on initial conversion.
if np_data.dtype.kind == 'U':
fill = ''
elif np_data.dtype.kind == 'S':
fill = b''
else:
# Zero works for every numeric type.
fill = 0
ranges = [range(dim) for dim in np_data.shape]
for idxs in itertools.product(*ranges):
val = data_filled[idxs]
if val is np_ma_masked:
data_filled[idxs] = fill
mask[idxs] = True
elif isinstance(val, bool) and dtype is None:
# If we see a bool and dtype not specified then assume bool for
# the entire array. Not perfect but in most practical cases OK.
# Unfortunately numpy types [False, 0] as int, not bool (and
# [False, np.ma.masked] => array([0.0, np.nan])).
dtype = bool
# If no dtype is provided then need to convert back to list so np.array
# does type autodetection.
if dtype is None:
data_filled = data_filled.tolist()
# Use np.array first to convert `data` to ndarray (fast) and then make
# masked array from an ndarray with mask (fast) instead of from `data`.
np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask)
return np_data
def _make_compare(oper):
"""
Make Column comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
Parameters
----------
oper : str
Operator name
"""
swapped_oper = {'__eq__': '__eq__',
'__ne__': '__ne__',
'__gt__': '__lt__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__le__': '__ge__'}[oper]
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# Special case to work around #6838. Other combinations work OK,
# see tests.test_column.test_unicode_sandwich_compare(). In this
# case just swap self and other.
#
# This is related to an issue in numpy that was addressed in np 1.13.
# However that fix does not make this problem go away, but maybe
# future numpy versions will do so. NUMPY_LT_1_13 to get the
# attention of future maintainers to check (by deleting or versioning
# the if block below). See #6899 discussion.
# 2019-06-21: still needed with numpy 1.16.
if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U'
and isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):
self, other = other, self
op = swapped_oper
if self.dtype.char == 'S':
other = self._encode_str(other)
# Now just let the regular ndarray.__eq__, etc., take over.
result = getattr(super(Column, self), op)(other)
# But we should not return Column instances for this case.
return result.data if isinstance(result, Column) else result
return _compare
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Column this is just the column itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
self_data = np.zeros((length,)+shape, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = Quantity(data, unit, dtype=dtype, copy=copy).value
# If 'info' has been defined, copy basic properties (if needed).
if 'info' in data.__dict__:
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = None if name is None else str(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def value(self):
return self.data
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
if 'info' in getattr(obj, '__dict__', {}):
self.info = obj.info
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape
or (isinstance(out_arr, BaseColumn)
and (context is not None
and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
if val is not None:
val = str(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{}': could not display "
"values in this column using this format".format(
self.name)) from err
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : bool
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def searchsorted(self, v, side='left', sorter=None):
# For bytes type data, encode the `v` value as UTF-8 (if necessary) before
# calling searchsorted. This prevents a factor of 1000 slowdown in
# searchsorted in this case.
a = self.data
if a.dtype.kind == 'S' and not isinstance(v, bytes):
v = np.asarray(v)
if v.dtype.kind == 'U':
v = np.char.encode(v, 'utf-8')
return np.searchsorted(a, v, side=side, sorter=sorter)
searchsorted.__doc__ = np.ndarray.searchsorted.__doc__
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, self.unit, copy=False, dtype=self.dtype, order='A', subok=True)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : unit-like
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of tuple
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, 'meta', None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == 'S':
return np.chararray.decode(self, encoding='utf-8').tolist()
else:
return super().tolist()
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append(f'{attr}={val!r}')
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
self_for_insert = _expand_string_array_for_values(self, values)
data = np.insert(self_for_insert, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
'parquet': 'data_mask',
None: 'null_value'}
def _represent_as_dict(self):
out = super()._represent_as_dict()
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
# Note: a driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out['data'] = col.data.data
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = col.mask
elif method == 'null_value':
pass
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str, or None
Value used when filling masked column elements
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None:
# If mask is None then we need to determine the mask (if any) from the data.
# The naive method is looking for a mask attribute on data, but this can fail,
# see #8816. Instead use ``MaskedArray`` to do the work.
mask = ma.MaskedArray(data).mask
if mask is np.ma.nomask:
# Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below.
mask = False
elif copy:
mask = mask.copy()
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# The above process preserves info relevant for Column, but this does
# not include serialize_method (and possibly other future attributes)
# relevant for MaskedColumn, so we set info explicitly.
if 'info' in getattr(data, '__dict__', {}):
self.info = data.info
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = np.array(data.fill_value, self.dtype)[()]
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
"""The plain MaskedArray data held by this column."""
out = self.view(np.ma.MaskedArray)
# By default, a MaskedArray view will set the _baseclass to be the
# same as that of our own class, i.e., BaseColumn. Since we want
# to return a plain MaskedArray, we reset the baseclass accordingly.
out._baseclass = np.ndarray
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
mask : bool or array-like
Mask value(s) to insert. If not supplied, and values does not have
a mask either, then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
self_ma = _expand_string_array_for_values(self_ma, values)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
mask = getattr(values, 'mask', np.ma.nomask)
if mask is np.ma.nomask:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(np.shape(values), dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
# TODO: this part is essentially the same as what is done in
# __array_finalize__ and could probably be called directly in our
# override of __getitem__ in _columns_mixins.pyx). Refactor?
if 'info' in self.__dict__:
out.info = self.info
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
8fbc137607dd786544e5b6eb12cd9c5af4315a5e24d3c71e1b17423a1110cc47 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError('index {} out of range for table with length {}'
.format(index, len(table)))
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
def keys(self):
return self._table.columns.keys()
def values(self):
return self.__iter__()
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : ``numpy.void`` or ``numpy.ma.mvoid``
Copy of row values.
``numpy.void`` if unmasked, ``numpy.ma.mvoid`` else.
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
mask = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
void_row = np.ma.array([vals], mask=[mask], dtype=self.dtype)[0]
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
f'index={self.index}']
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid=f'table{id(self._table)}')
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.abc.Sequence.register(Row)
|
06ad00fec8dcd0f8cfe753f63ff3657bf6074ec8911b60743ea075d8b3f2a0ca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from .index import get_index_by_names
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['TableGroups', 'ColumnGroups']
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode('discard_on_copy'):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .table import Table
from .serialize import represent_mixins_as_columns
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError(f'Table does not have key column {name!r}')
if table.masked and np.any(table[name].mask):
raise ValueError(f'Missing values in key column {name!r} are not allowed')
# Make a column slice of the table without copying
table_keys = table.__class__([table[key] for key in keys], copy=False)
# If available get a pre-existing index for these columns
table_index = get_index_by_names(table, keys)
grouped_by_table_cols = True
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError('Input keys array length {} does not match table length {}'
.format(len(table_keys), len(table)))
table_index = None
grouped_by_table_cols = False
else:
raise TypeError('Keys input must be string, list, tuple, Table or numpy array, but got {}'
.format(type(keys)))
# If there is not already an available index and table_keys is a Table then ensure
# that all cols (including mixins) are in a form that can sorted with the code below.
if not table_index and isinstance(table_keys, Table):
table_keys = represent_mixins_as_columns(table_keys)
# Get the argsort index `idx_sort`, accounting for particulars
try:
# take advantage of index internal sort if possible
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys.argsort(kind='mergesort')
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ('Darwin', 'Windows')
# Finally do the actual sort of table_keys values
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .table import Table
from .serialize import represent_mixins_as_columns
if isinstance(keys, Table):
keys = represent_mixins_as_columns(keys)
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError(f'Keys input must be numpy array, but got {type(keys)}')
if len(keys) != len(column):
raise ValueError('Input keys array length {} does not match column length {}'
.format(len(keys), len(column)))
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception as err:
raise TypeError('Index item for groups attribute must be a slice, '
'numpy mask or int array') from err
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return f'<{self.__class__.__name__} indices={self.indices}>'
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, 'reduceat')
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0: i1]) for i0, i1 in zip(i0s, i1s)])
except Exception as err:
raise TypeError("Cannot aggregate column '{}' with type '{}'"
.format(par_col.info.name,
par_col.info.dtype)) from err
out = par_col.__class__(data=vals,
name=par_col.info.name,
description=par_col.info.description,
unit=par_col.info.unit,
format=par_col.info.format,
meta=par_col.info.meta)
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s = self.indices[:-1]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
01500c44fbe1b840b5994b9590d7d938abcd8d814e5cddacd971e7b2d1a1f3db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import re
import fnmatch
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, terminal_size, conf
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode('utf-8', errors='replace')
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (str(val) if val is np.ma.masked
else format_func(format_, val))
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
def get_auto_format_func(
col=None,
possible_string_format_functions=_possible_string_format_functions):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val) # noqa
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError('Format function for value {} returned {} '
'instead of string type'
.format(val, type(val)))
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError(f'Format function for value {val} failed.') from err
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError('the format passed in did nothing.')
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError('unable to parse format string {} for its '
'column.'.format(format_))
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
def _get_pprint_include_names(table):
"""Get the set of names to show in pprint from the table pprint_include_names
and pprint_exclude_names attributes.
These may be fnmatch unix-style globs.
"""
def get_matches(name_globs, default):
match_names = set()
if name_globs: # For None or () use the default
for name in table.colnames:
for name_glob in name_globs:
if fnmatch.fnmatch(name, name_glob):
match_names.add(name)
break
else:
match_names.update(default)
return match_names
include_names = get_matches(table.pprint_include_names(), table.colnames)
exclude_names = get_matches(table.pprint_exclude_names(), [])
return include_names - exclude_names
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
# Declare to keep static type checker happy.
lines = None
width = None
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,
show_dtype=False, show_length=None, html=False, align=None):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs)
# Replace tab and newline with text representations so they display nicely.
# Newline in particular is a problem in a multicolumn table.
col_strs = [val.replace('\t', '\\t').replace('\n', '\\n') for val in col_strs_iter]
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs['n_header']
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
val = f'<{td}>{xml_escape(col_str.strip())}</{td}>'
row = ('<tr>' + val + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, '<table>')
col_strs.append('</table>')
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs['i_centers']:
col_strs[i] = col_strs[i].center(col_width)
if outs['i_dashes'] is not None:
col_strs[outs['i_dashes']] = '-' * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError("column align must be one of '<', '^', '>', or '='")
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group('fill')
align_char = match.group('align')
if align_char == '=':
if fill_char != '0':
raise ValueError("fill character must be '0' for '=' align")
fill_char = '' # str.zfill gets used which does not take fill char arg
else:
fill_char = ''
align_char = '>'
justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs['show_length']:
col_strs.append(f'Length = {len(col)} rows')
return col_strs, outs
def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,
show_dtype=False, show_length=None):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
multidims = getattr(col, 'shape', [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
trivial_multidims = np.prod(multidims) == 1
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
if multidims:
col_name += f" [{','.join(str(n) for n in multidims)}]"
n_header += 1
yield col_name
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or '')
if show_dtype:
i_centers.append(n_header)
n_header += 1
try:
dtype = dtype_info_name(col.dtype)
except AttributeError:
dtype = col.__class__.__qualname__ or 'object'
yield str(dtype)
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield '---'
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, 'default_format',
None)
pssf = (getattr(col.info, 'possible_string_format_functions', None)
or _possible_string_format_functions)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate([np.arange(0, i0 + 1),
np.arange(i1 + 1, len(col))])
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if trivial_multidims:
return format_func(col_format, col[(idx,) + multidim0])
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f'{left} .. {right}'
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield '...'
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{}" for entry "{}" '
'in column "{}"'.format(col_format, col[idx],
col.info.name))
outs['show_length'] = show_length
outs['n_header'] = n_header
outs['i_centers'] = i_centers
outs['i_dashes'] = i_dashes
def _pformat_table(self, table, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False,
html=False, tableid=None, tableclass=None, align=None):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError('got {} alignment values instead of '
'the number of columns ({})'
.format(len(align), n_cols))
else:
raise TypeError('align keyword must be str or list or tuple (got {})'
.format(type(align)))
# Process column visibility from table pprint_include_names and
# pprint_exclude_names attributes and get the set of columns to show.
pprint_include_names = _get_pprint_include_names(table)
cols = []
outs = None # Initialize so static type checker is happy
for align_, col in zip(align, table.columns.values()):
if col.info.name not in pprint_include_names:
continue
lines, outs = self._pformat_col(col, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
align=align_)
if outs['show_length']:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ['<No columns>'], {'show_length': False}
# Use the values for the last column since they are all the same
n_header = outs['n_header']
n_rows = len(cols[0])
def outwidth(cols):
return sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ['...'] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = f'table{id(table)}'
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = ' '.join(tableclass)
rows.append(f'<table id="{tableid}" class="{tableclass}">')
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
vals = (f'<{td}>{xml_escape(col[i].strip())}</{td}>'
for col in cols)
row = ('<tr>' + ''.join(vals) + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
rows.append(row)
rows.append('</table>')
else:
for i in range(n_rows):
row = ' '.join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(self, tabcol, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = 'f br<>qhpn'
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
if hasattr(tabcol, 'columns'): # tabcol is a table
kwargs['max_width'] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system('cls' if os.name == 'nt' else 'clear')
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = ('red' if i < n_header else 'default'
for i in range(len(lines)))
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ')
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error('Console does not support getting a character'
' as required by more(). Use pprint() instead.')
return
if key in allowed_keys:
break
print(key)
if key.lower() == 'q':
break
elif key == ' ' or key == 'f':
i0 += delta_lines
elif key == 'b':
i0 = i0 - delta_lines
elif key == 'r':
pass
elif key == '<':
i0 = 0
elif key == '>':
i0 = len(tabcol)
elif key == 'p':
i0 -= 1
elif key == 'n':
i0 += 1
elif key == 'h':
showlines = False
print("""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""", end=' ')
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
40c5ce95c5fb602e61894827ea7fc4bc730178129daa2acda4e156eb7e99bb33 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from os.path import abspath, dirname, join
from .table import Table
import astropy.io.registry as io_registry
import astropy.config as _config
from astropy import extern
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table.jsviewer`.
"""
jquery_url = _config.ConfigItem(
'https://code.jquery.com/jquery-3.1.1.min.js',
'The URL to the jquery library.')
datatables_url = _config.ConfigItem(
'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
'The URL to the jquery datatables library.')
css_urls = _config.ConfigItem(
['https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css'],
'The URLs to the css file(s) to include.', cfgtype='string_list')
conf = Conf()
EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'js'))
EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'css'))
_SORTING_SCRIPT_PART_1 = """
var astropy_sort_num = function(a, b) {{
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}}
"""
_SORTING_SCRIPT_PART_2 = """
jQuery.extend( jQuery.fn.dataTableExt.oSort, {{
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }}
}});
"""
IPYNB_JS_SCRIPT = """
<script>
%(sorting_script1)s
require.config({{paths: {{
datatables: '{datatables_url}'
}}}});
require(["datatables"], function(){{
console.log("$('#{tid}').dataTable()");
%(sorting_script2)s
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}});
</script>
""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1,
sorting_script2=_SORTING_SCRIPT_PART_2)
HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """
$(document).ready(function() {{
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}} );
"""
# Default CSS for the JSViewer writer
DEFAULT_CSS = """\
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
"""
# Default CSS used when rendering a table in the IPython notebook
DEFAULT_CSS_NB = """\
table.dataTable {clear: both; width: auto !important; margin: 0 !important;}
.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{
display: inline-block; margin-right: 1em; }
.paginate_button { margin-right: 5px; }
"""
class JSViewer:
"""Provides an interactive HTML export of a Table.
This class provides an interface to the `DataTables
<https://datatables.net/>`_ library, which allow to visualize interactively
an HTML table. It is used by the `~astropy.table.Table.show_in_browser`
method.
Parameters
----------
use_local_files : bool, optional
Use local files or a CDN for JavaScript libraries. Default False.
display_length : int, optional
Number or rows to show. Default to 50.
"""
def __init__(self, use_local_files=False, display_length=50):
self._use_local_files = use_local_files
self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1],
[10, 25, 50, 100, 500, 1000, "All"]]
self.display_length = display_length
for L in self.display_length_menu:
if display_length not in L:
L.insert(0, display_length)
@property
def jquery_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.1.1.min.js'),
'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')]
else:
return [conf.jquery_url, conf.datatables_url]
@property
def css_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_CSS_DIR,
'jquery.dataTables.css')]
else:
return conf.css_urls
def _jstable_file(self):
if self._use_local_files:
return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min')
else:
return conf.datatables_url[:-3]
def ipynb(self, table_id, css=None, sort_columns='[]'):
html = f'<style>{css if css is not None else DEFAULT_CSS_NB}</style>'
html += IPYNB_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
datatables_url=self._jstable_file(),
tid=table_id, sort_columns=sort_columns)
return html
def html_js(self, table_id='table0', sort_columns='[]'):
return HTML_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
tid=table_id, sort_columns=sort_columns).strip()
def write_table_jsviewer(table, filename, table_id=None, max_lines=5000,
table_class="display compact", jskwargs=None,
css=DEFAULT_CSS, htmldict=None, overwrite=False):
if table_id is None:
table_id = f'table{id(table)}'
jskwargs = jskwargs or {}
jsv = JSViewer(**jskwargs)
sortable_columns = [i for i, col in enumerate(table.columns.values())
if col.info.dtype.kind in 'iufc']
html_options = {
'table_id': table_id,
'table_class': table_class,
'css': css,
'cssfiles': jsv.css_urls,
'jsfiles': jsv.jquery_urls,
'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns)
}
if htmldict:
html_options.update(htmldict)
if max_lines < len(table):
table = table[:max_lines]
table.write(filename, format='html', htmldict=html_options,
overwrite=overwrite)
io_registry.register_writer('jsviewer', Table, write_table_jsviewer)
|
682e3d8156b7564494d9716af5da937fdb12074a6c1a7a91053209788ed2b994 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.utils.data_info import ParentDtypeInfo
class NdarrayMixinInfo(ParentDtypeInfo):
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
"""Represent Column as a dict that can be serialized."""
col = self._parent
out = {'data': col.view(np.ndarray)}
return out
def _construct_from_dict(self, map):
"""Construct Column from ``map``."""
data = map.pop('data')
out = self._parent_cls(data, **map)
return out
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ``np.array()``.
"""
info = NdarrayMixinInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle NdArrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
|
a7dc76058f28227b71b261fbb2ed79e521f40d68fe418394cacafc67740247d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
__all__ = ['BST']
class MaxValue:
'''
Represents an infinite value for purposes
of tuple comparison.
'''
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __repr__(self):
return "MAX"
__str__ = __repr__
class MinValue:
'''
The opposite of MaxValue, i.e. a representation of
negative infinity.
'''
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __repr__(self):
return "MIN"
__str__ = __repr__
class Epsilon:
'''
Represents the "next largest" version of a given value,
so that for all valid comparisons we have
x < y < Epsilon(y) < z whenever x < y < z and x, z are
not Epsilon objects.
Parameters
----------
val : object
Original value
'''
__slots__ = ('val',)
def __init__(self, val):
self.val = val
def __lt__(self, other):
if self.val == other:
return False
return self.val < other
def __gt__(self, other):
if self.val == other:
return True
return self.val > other
def __eq__(self, other):
return False
def __repr__(self):
return repr(self.val) + " + epsilon"
class Node:
'''
An element in a binary search tree, containing
a key, data, and references to children nodes and
a parent node.
Parameters
----------
key : tuple
Node key
data : list or int
Node data
'''
__lt__ = lambda x, y: x.key < y.key
__le__ = lambda x, y: x.key <= y.key
__eq__ = lambda x, y: x.key == y.key
__ge__ = lambda x, y: x.key >= y.key
__gt__ = lambda x, y: x.key > y.key
__ne__ = lambda x, y: x.key != y.key
__slots__ = ('key', 'data', 'left', 'right')
# each node has a key and data list
def __init__(self, key, data):
self.key = key
self.data = data if isinstance(data, list) else [data]
self.left = None
self.right = None
def replace(self, child, new_child):
'''
Replace this node's child with a new child.
'''
if self.left is not None and self.left == child:
self.left = new_child
elif self.right is not None and self.right == child:
self.right = new_child
else:
raise ValueError("Cannot call replace() on non-child")
def remove(self, child):
'''
Remove the given child.
'''
self.replace(child, None)
def set(self, other):
'''
Copy the given node.
'''
self.key = other.key
self.data = other.data[:]
def __str__(self):
return str((self.key, self.data))
def __repr__(self):
return str(self)
class BST:
'''
A basic binary search tree in pure Python, used
as an engine for indexing.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
'''
NodeClass = Node
def __init__(self, data, row_index, unique=False):
self.root = None
self.size = 0
self.unique = unique
for key, row in zip(data, row_index):
self.add(tuple(key), row)
def add(self, key, data=None):
'''
Add a key, data pair.
'''
if data is None:
data = key
self.size += 1
node = self.NodeClass(key, data)
curr_node = self.root
if curr_node is None:
self.root = node
return
while True:
if node < curr_node:
if curr_node.left is None:
curr_node.left = node
break
curr_node = curr_node.left
elif node > curr_node:
if curr_node.right is None:
curr_node.right = node
break
curr_node = curr_node.right
elif self.unique:
raise ValueError("Cannot insert non-unique value")
else: # add data to node
curr_node.data.extend(node.data)
curr_node.data = sorted(curr_node.data)
return
def find(self, key):
'''
Return all data values corresponding to a given key.
Parameters
----------
key : tuple
Input key
Returns
-------
data_vals : list
List of rows corresponding to the input key
'''
node, parent = self.find_node(key)
return node.data if node is not None else []
def find_node(self, key):
'''
Find the node associated with the given key.
'''
if self.root is None:
return (None, None)
return self._find_recursive(key, self.root, None)
def shift_left(self, row):
'''
Decrement all rows larger than the given row.
'''
for node in self.traverse():
node.data = [x - 1 if x > row else x for x in node.data]
def shift_right(self, row):
'''
Increment all rows greater than or equal to the given row.
'''
for node in self.traverse():
node.data = [x + 1 if x >= row else x for x in node.data]
def _find_recursive(self, key, node, parent):
try:
if key == node.key:
return (node, parent)
elif key > node.key:
if node.right is None:
return (None, None)
return self._find_recursive(key, node.right, node)
else:
if node.left is None:
return (None, None)
return self._find_recursive(key, node.left, node)
except TypeError: # wrong key type
return (None, None)
def traverse(self, order='inorder'):
'''
Return nodes of the BST in the given order.
Parameters
----------
order : str
The order in which to recursively search the BST.
Possible values are:
"preorder": current node, left subtree, right subtree
"inorder": left subtree, current node, right subtree
"postorder": left subtree, right subtree, current node
'''
if order == 'preorder':
return self._preorder(self.root, [])
elif order == 'inorder':
return self._inorder(self.root, [])
elif order == 'postorder':
return self._postorder(self.root, [])
raise ValueError(f"Invalid traversal method: \"{order}\"")
def items(self):
'''
Return BST items in order as (key, data) pairs.
'''
return [(x.key, x.data) for x in self.traverse()]
def sort(self):
'''
Make row order align with key order.
'''
i = 0
for node in self.traverse():
num_rows = len(node.data)
node.data = [x for x in range(i, i + num_rows)]
i += num_rows
def sorted_data(self):
'''
Return BST rows sorted by key values.
'''
return [x for node in self.traverse() for x in node.data]
def _preorder(self, node, lst):
if node is None:
return lst
lst.append(node)
self._preorder(node.left, lst)
self._preorder(node.right, lst)
return lst
def _inorder(self, node, lst):
if node is None:
return lst
self._inorder(node.left, lst)
lst.append(node)
self._inorder(node.right, lst)
return lst
def _postorder(self, node, lst):
if node is None:
return lst
self._postorder(node.left, lst)
self._postorder(node.right, lst)
lst.append(node)
return lst
def _substitute(self, node, parent, new_node):
if node is self.root:
self.root = new_node
else:
parent.replace(node, new_node)
def remove(self, key, data=None):
'''
Remove data corresponding to the given key.
Parameters
----------
key : tuple
The key to remove
data : int or None
If None, remove the node corresponding to the given key.
If not None, remove only the given data value from the node.
Returns
-------
successful : bool
True if removal was successful, false otherwise
'''
node, parent = self.find_node(key)
if node is None:
return False
if data is not None:
if data not in node.data:
raise ValueError("Data does not belong to correct node")
elif len(node.data) > 1:
node.data.remove(data)
return True
if node.left is None and node.right is None:
self._substitute(node, parent, None)
elif node.left is None and node.right is not None:
self._substitute(node, parent, node.right)
elif node.right is None and node.left is not None:
self._substitute(node, parent, node.left)
else:
# find largest element of left subtree
curr_node = node.left
parent = node
while curr_node.right is not None:
parent = curr_node
curr_node = curr_node.right
self._substitute(curr_node, parent, curr_node.left)
node.set(curr_node)
self.size -= 1
return True
def is_valid(self):
'''
Returns whether this is a valid BST.
'''
return self._is_valid(self.root)
def _is_valid(self, node):
if node is None:
return True
return (node.left is None or node.left <= node) and \
(node.right is None or node.right >= node) and \
self._is_valid(node.left) and self._is_valid(node.right)
def range(self, lower, upper, bounds=(True, True)):
'''
Return all nodes with keys in the given range.
Parameters
----------
lower : tuple
Lower bound
upper : tuple
Upper bound
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
'''
nodes = self.range_nodes(lower, upper, bounds)
return [x for node in nodes for x in node.data]
def range_nodes(self, lower, upper, bounds=(True, True)):
'''
Return nodes in the given range.
'''
if self.root is None:
return []
# op1 is <= or <, op2 is >= or >
op1 = operator.le if bounds[0] else operator.lt
op2 = operator.ge if bounds[1] else operator.gt
return self._range(lower, upper, op1, op2, self.root, [])
def same_prefix(self, val):
'''
Assuming the given value has smaller length than keys, return
nodes whose keys have this value as a prefix.
'''
if self.root is None:
return []
nodes = self._same_prefix(val, self.root, [])
return [x for node in nodes for x in node.data]
def _range(self, lower, upper, op1, op2, node, lst):
if op1(lower, node.key) and op2(upper, node.key):
lst.append(node)
if upper > node.key and node.right is not None:
self._range(lower, upper, op1, op2, node.right, lst)
if lower < node.key and node.left is not None:
self._range(lower, upper, op1, op2, node.left, lst)
return lst
def _same_prefix(self, val, node, lst):
prefix = node.key[:len(val)]
if prefix == val:
lst.append(node)
if prefix <= val and node.right is not None:
self._same_prefix(val, node.right, lst)
if prefix >= val and node.left is not None:
self._same_prefix(val, node.left, lst)
return lst
def __repr__(self):
return f'<{self.__class__.__name__}>'
def _print(self, node, level):
line = '\t' * level + str(node) + '\n'
if node.left is not None:
line += self._print(node.left, level + 1)
if node.right is not None:
line += self._print(node.right, level + 1)
return line
@property
def height(self):
'''
Return the BST height.
'''
return self._height(self.root)
def _height(self, node):
if node is None:
return -1
return max(self._height(node.left),
self._height(node.right)) + 1
def replace_rows(self, row_map):
'''
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their nodes deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
'''
for key, data in self.items():
data[:] = [row_map[x] for x in data if x in row_map]
|
db6cde48d019a7516a096527dfad4880ddfce737eed00ed463371a967d1e3290 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from setuptools import Extension
import numpy
ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
sources = ["_np_utils.pyx", "_column_mixins.pyx"]
include_dirs = [numpy.get_include()]
exts = [
Extension(name='astropy.table.' + os.path.splitext(source)[0],
sources=[os.path.join(ROOT, source)],
include_dirs=include_dirs)
for source in sources
]
return exts
|
c42c0d5375112980d71b0cd6ee7070da392b7dabf5ff04f8a182d7475032a61b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import SlicedIndex, TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import types
import itertools
import weakref
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaData, MetaAttribute
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy, _convert_sequence_data_to_array)
from .row import Row
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from .ndarray_mixin import NdarrayMixin
from .mixins.registry import get_mixin_handler
from . import conf
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas>=1.1']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, 'info', None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, '_instance_ref'):
out = f'<{self.__class__.__name__} name={self.name} value={self()}>'
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist"""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and '__attributes__' not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f'{name} not in {self.name}')
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list"""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):
data[col.info.name].mask = col.mask
return data
def __init__(self, data=None, masked=False, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
units=None, descriptions=None,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray)
and data.shape == (0,)
and not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (names_from_list_of_dict
or _get_names_from_list_of_dict(data))
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f'Data type {type(data)} not allowed to init Table')
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute('unit', units)
self._set_column_attribute('description', descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(f'sequence of {attr} values must match number of columns')
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(f'invalid column name {name} for setting {attr} attribute')
# Special case: ignore unit if it is an empty or blank string
if attr == 'unit' and isinstance(value, str):
if value.strip() == '':
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))
for col in self.itercols()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) if hasattr(col, 'filled') else col
for col in self.itercols()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{}", of '
'type "{}"'.format(col.info.name, type(col)))
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError(f'{inp_str} must be a list or None')
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns')
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (original_data.__class__.__module__ + '.'
+ original_data.__class__.__name__)
raise TypeError('Mixin handler for object of type '
f'{fully_qualified_name} '
'did not return a valid mixin column')
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (not isinstance(data, Column) and not data_is_mixin
and isinstance(data, np.ndarray) and len(data.dtype) > 1):
data = data.view(NdarrayMixin)
data_is_mixin = True
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif 'info' in getattr(data, '__dict__', ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
col = col_copy(data, copy_indices=self._init_indices) if copy else data
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, 'dtype'):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass
else:
col_cls = self.ColumnClass
try:
col = col_cls(name=name, data=data, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError('unable to convert data to Column for Table')
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) > 1:
raise ValueError(f'Inconsistent data column lengths: {lengths}')
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append(f'length={len(self)}')
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f'<i>{xml_escape(descr)}</i>\n'
else:
descr = f'<{descr}>\n'
if tableid is None:
tableid = f'table{id(self)}'
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f'<div>{out}</div>'
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, 'mask') and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = f'table{id(self)}-{np.random.randint(1, 1e6)}'
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0)
or (isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or (isinstance(item, tuple) # output from np.where
and all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray))
and all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray))
and np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names
and all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,
default_name=None):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f'col{len(self.columns)}'
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(col, name=name, copy=copy,
default_name=default_name)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError('Empty table cannot have column set to scalar value')
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, 'shape', ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape,
subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape,
subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError('Inconsistent data column lengths')
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + '_' + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
default_names = [f'col{ii + len(self.columns)}'
for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes)):
self.add_column(cols[ii], index=indexes[ii], name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate, copy=copy)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn(f"replaced column '{name}'",
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f'column name {name} is not in the table')
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError('length of new column must match table length')
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f'{name} is not a valid column name')
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f'columns {invalid_names} do not exist')
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
for name in self._set_of_names_in_colnames(names):
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
self.columns.pop(colname)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {} is out of bounds for table with length {}"
.format(index, N))
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):
col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {} after inserting {}'
' (expected {}, got {})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, 'mask'):
newcol[index] = np.ma.masked
else:
raise TypeError("mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name))
columns[name] = newcol
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{}':\n{}"
.format(name, err)) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts='silent')
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs['order'] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs['kind'] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode('freeze'):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
'''
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
'''
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError('cannot compare tables with different column names')
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
eq = self[name] == other[name]
if (warns and issubclass(warns[-1].category, FutureWarning)
and 'elementwise comparison failed' in str(warns[-1].message)):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f'unable to compare column {name}') from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (isinstance(eq, np.ndarray)
and eq.dtype is np.dtype('bool')
and len(eq) == len(self)):
raise TypeError(f'comparison for column {name} returned {eq} '
f'instead of the expected boolean ndarray')
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.time import TimeBase, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)')
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, 'isnative', True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder('=')
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U')
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to {out[name].dtype}",
TableReplaceWarning, stacklevel=3)
elif column.dtype.kind not in ['f', 'c']:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs['index'] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f'`units` contains additional columns: {not_found}')
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ['u', 'i'] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False)
continue
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, 'unit', None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
0edf849fcdd5a25cd472ccc4709c3def8dce4f5d16be9e1c20e54ec0c3fd805a | import json
import textwrap
import copy
from collections import OrderedDict
import numpy as np
import yaml
__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table']
class ColumnOrderList(list):
"""
List of tuples that sorts in a specific order that makes sense for
astropy table column attributes.
"""
def sort(self, *args, **kwargs):
super().sort()
column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta']
in_dict = dict(self)
out_list = []
for key in column_keys:
if key in in_dict:
out_list.append((key, in_dict[key]))
for key, val in self:
if key not in column_keys:
out_list.append((key, val))
# Clear list in-place
del self[:]
self.extend(out_list)
class ColumnDict(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct OrderedDict from !!omap in yaml safe load.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
Examples
--------
::
>>> yaml.load(''' # doctest: +SKIP
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a sequence, but found {node.id}", node.start_mark)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a mapping of length 1, but found {subnode.id}",
subnode.start_mark)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a single mapping item, but found {len(subnode.value)} items",
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, 'tag:yaml.org,2002:omap', data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping('tag:yaml.org,2002:map', data)
def _get_variable_length_array_shape(col):
"""Check if object-type ``col`` is really a variable length list.
That is true if the object consists purely of list of nested lists, where
the shape of every item can be represented as (m, n, ..., *) where the (m,
n, ...) are constant and only the lists in the last axis have variable
shape. If so the returned value of shape will be a tuple in the form (m, n,
..., None).
If ``col`` is a variable length array then the return ``dtype`` corresponds
to the type found by numpy for all the individual values. Otherwise it will
be ``np.dtype(object)``.
Parameters
==========
col : column-like
Input table column, assumed to be object-type
Returns
=======
shape : tuple
Inferred variable length shape or None
dtype : np.dtype
Numpy dtype that applies to col
"""
class ConvertError(ValueError):
"""Local conversion error used below"""
# Numpy types supported as variable-length arrays
np_classes = (np.floating, np.integer, np.bool_, np.unicode_)
try:
if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col):
raise ConvertError
dtype = col[0].dtype
shape = col[0].shape[:-1]
for val in col:
if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape:
raise ConvertError
dtype = np.promote_types(dtype, val.dtype)
shape = shape + (None,)
except ConvertError:
# `col` is not a variable length array, return shape and dtype to
# the original. Note that this function is only called if
# col.shape[1:] was () and col.info.dtype is object.
dtype = col.info.dtype
shape = ()
return shape, dtype
def _get_datatype_from_dtype(dtype):
"""Return string version of ``dtype`` for writing to ECSV ``datatype``"""
datatype = dtype.name
if datatype.startswith(('bytes', 'str')):
datatype = 'string'
if datatype.endswith('_'):
datatype = datatype[:-1] # string_ and bool_ lose the final _ for ECSV
return datatype
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
Parameters
----------
col : column-like
Input Table column
Returns
-------
attrs : dict
Dict of ECSV attributes for ``col``
"""
dtype = col.info.dtype # Type of column values that get written
subtype = None # Type of data for object columns serialized with JSON
shape = col.shape[1:] # Shape of multidim / variable length columns
if dtype.name == 'object':
if shape == ():
# 1-d object type column might be a variable length array
dtype = np.dtype(str)
shape, subtype = _get_variable_length_array_shape(col)
else:
# N-d object column is subtype object but serialized as JSON string
dtype = np.dtype(str)
subtype = np.dtype(object)
elif shape:
# N-d column which is not object is serialized as JSON string
dtype = np.dtype(str)
subtype = col.info.dtype
datatype = _get_datatype_from_dtype(dtype)
# Set the output attributes
attrs = ColumnDict()
attrs['name'] = col.info.name
attrs['datatype'] = datatype
for attr, nontrivial, xform in (('unit', lambda x: x is not None, str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
if subtype:
attrs['subtype'] = _get_datatype_from_dtype(subtype)
# Numpy 'object' maps to 'subtype' of 'json' in ECSV
if attrs['subtype'] == 'object':
attrs['subtype'] = 'json'
if shape:
attrs['subtype'] += json.dumps(list(shape), separators=(',', ':'))
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {'cols': list(table.columns.values())}
if table.meta:
header['meta'] = table.meta
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
from astropy.io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
if hasattr(mapping, 'sort'):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header['datatype'] = [_get_col_attributes(col) for col in header['cols']]
del header['cols']
lines = yaml.dump(header, default_flow_style=None,
Dumper=TableDumper, width=130).splitlines()
return lines
class YamlParseError(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
from astropy.io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor('tag:yaml.org,2002:omap', _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent('\n'.join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError() from err
return header
|
ab5e01a4fb8d8a279059c81db4a7b0a1576ef1e408f5507c38e6f8f05008a664 | """
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
- dstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import collections
import itertools
from collections import OrderedDict, Counter
from collections.abc import Mapping, Sequence
import numpy as np
from astropy.utils import metadata
from astropy.utils.masked import Masked
from .table import Table, QTable, Row, Column, MaskedColumn
from astropy.units import Quantity
from . import _np_utils
from .np_utils import TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique',
'join_skycoord', 'join_distance']
__doctest_requires__ = {'join_skycoord': ['scipy'], 'join_distance': ['scipy']}
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError('no values provided to stack.')
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError) as err:
raise TypeError(f'Cannot convert {val} to table column.') from err
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes, but as a special case, classes which share ``info``
are taken to be compatible.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not (issubclass(out_class, obj.__class__)
or out_class.info is obj.__class__.info) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join_skycoord(distance, distance_func='search_around_sky'):
"""Helper function to join on SkyCoord columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing a
table join where the key columns are both ``SkyCoord`` objects, matched by
computing the distance between points and accepting values below
``distance``.
The distance cross-matching is done using either
`~astropy.coordinates.search_around_sky` or
`~astropy.coordinates.search_around_3d`, depending on the value of
``distance_func``. The default is ``'search_around_sky'``.
One can also provide a function object for ``distance_func``, in which case
it must be a function that follows the same input and output API as
`~astropy.coordinates.search_around_sky`. In this case the function will
be called with ``(skycoord1, skycoord2, distance)`` as arguments.
Parameters
----------
distance : `~astropy.units.Quantity` ['angle', 'length']
Maximum distance between points to be considered a join match.
Must have angular or distance units.
distance_func : str or function
Specifies the function for performing the cross-match based on
``distance``. If supplied as a string this specifies the name of a
function in `astropy.coordinates`. If supplied as a function then that
function is called directly.
Returns
-------
join_func : function
Function that accepts two ``SkyCoord`` columns (col1, col2) and returns
the tuple (ids1, ids2) of pair-matched unique identifiers.
Examples
--------
This example shows an inner join of two ``SkyCoord`` columns, taking any
sources within 0.2 deg to be a match. Note the new ``sc_id`` column which
is added and provides a unique source identifier for the matches.
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from astropy.table import Table, join_skycoord
>>> from astropy import table
>>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
>>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
>>> join_func = join_skycoord(0.2 * u.deg)
>>> join_func(sc1, sc2) # Associate each coordinate with unique source ID
(array([3, 1, 1, 2]), array([4, 1, 2]))
>>> t1 = Table([sc1], names=['sc'])
>>> t2 = Table([sc2], names=['sc'])
>>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
>>> print(t12) # Note new `sc_id` column with the IDs from join_func()
sc_id sc_1 sc_2
deg,deg deg,deg
----- ------- --------
1 1.0,0.0 1.05,0.0
1 1.1,0.0 1.05,0.0
2 2.0,0.0 2.1,0.0
"""
if isinstance(distance_func, str):
import astropy.coordinates as coords
try:
distance_func = getattr(coords, distance_func)
except AttributeError as err:
raise ValueError('distance_func must be a function in astropy.coordinates') from err
else:
from inspect import isfunction
if not isfunction(distance_func):
raise ValueError('distance_func must be a str or function')
def join_func(sc1, sc2):
# Call the appropriate SkyCoord method to find pairs within distance
idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance)
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(sc1), dtype=int)
ids2 = np.zeros(len(sc2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idx2 in zip(idxs1, idxs2):
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join_distance(distance, kdtree_args=None, query_args=None):
"""Helper function to join table columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing
a table join where the key columns are matched by computing the distance
between points and accepting values below ``distance``. This numerical
"fuzzy" match can apply to 1-D or 2-D columns, where in the latter case
the distance is a vector distance.
The distance cross-matching is done using `scipy.spatial.cKDTree`. If
necessary you can tweak the default behavior by providing ``dict`` values
for the ``kdtree_args`` or ``query_args``.
Parameters
----------
distance : float or `~astropy.units.Quantity` ['length']
Maximum distance between points to be considered a join match
kdtree_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree`
query_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree`
Returns
-------
join_func : function
Function that accepts (skycoord1, skycoord2) and returns the tuple
(ids1, ids2) of pair-matched unique identifiers.
Examples
--------
>>> from astropy.table import Table, join_distance
>>> from astropy import table
>>> c1 = [0, 1, 1.1, 2]
>>> c2 = [0.5, 1.05, 2.1]
>>> t1 = Table([c1], names=['col'])
>>> t2 = Table([c2], names=['col'])
>>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)})
>>> print(t12)
col_id col_1 col_2
------ ----- -----
1 1.0 1.05
1 1.1 1.05
2 2.0 2.1
3 0.0 --
4 -- 0.5
"""
try:
from scipy.spatial import cKDTree
except ImportError as exc:
raise ImportError('scipy is required to use join_distance()') from exc
if kdtree_args is None:
kdtree_args = {}
if query_args is None:
query_args = {}
def join_func(col1, col2):
if col1.ndim > 2 or col2.ndim > 2:
raise ValueError('columns for isclose_join must be 1- or 2-dimensional')
if isinstance(distance, Quantity):
# Convert to np.array with common unit
col1 = col1.to_value(distance.unit)
col2 = col2.to_value(distance.unit)
dist = distance.value
else:
# Convert to np.array to allow later in-place shape changing
col1 = np.asarray(col1)
col2 = np.asarray(col2)
dist = distance
# Ensure columns are pure np.array and are 2-D for use with KDTree
if col1.ndim == 1:
col1.shape = col1.shape + (1,)
if col2.ndim == 1:
col2.shape = col2.shape + (1,)
# Cross-match col1 and col2 within dist using KDTree
kd1 = cKDTree(col1, **kdtree_args)
kd2 = cKDTree(col2, **kdtree_args)
nears = kd1.query_ball_tree(kd2, r=dist, **query_args)
# Output of above is nears which is a list of lists, where the outer
# list corresponds to each item in col1, and where the inner lists are
# indexes into col2 of elements within the distance tolerance. This
# identifies col1 / col2 near pairs.
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(col1), dtype=int)
ids2 = np.zeros(len(col2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idxs2 in enumerate(nears):
for idx2 in idxs2:
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join(left, right, keys=None, join_type='inner', *,
keys_left=None, keys_right=None,
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn',
join_funcs=None):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : `~astropy.table.Table`-like object
Left side table in the join. If not a Table, will call ``Table(left)``
right : `~astropy.table.Table`-like object
Right side table in the join. If not a Table, will call ``Table(right)``
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
keys_left : str or list of str or list of column-like, optional
Left column(s) used to match rows instead of ``keys`` arg. This can be
be a single left table column name or list of column names, or a list of
column-like values with the same lengths as the left table.
keys_right : str or list of str or list of column-like, optional
Same as ``keys_left``, but for the right side of the join.
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts,
join_funcs,
keys_left=keys_left, keys_right=keys_right)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
# Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1, 'table1'), (table2, 'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 index2 is masked then that means some rows were in table1 but not table2.
if hasattr(t12['__index2__'], 'mask'):
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def dstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack columns within tables depth-wise
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along depth-wise with the current table
Table columns should have same shape and name for depth-wise stacking
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(dstack([t1, t2]))
a [2] b [2]
------ ------
1 .. 5 3 .. 7
2 .. 6 4 .. 8
"""
_check_join_type(join_type, 'dstack')
tables = _get_list_of_tables(tables)
if len(tables) == 1:
return tables[0] # no point in stacking a single table
n_rows = set(len(table) for table in tables)
if len(n_rows) != 1:
raise ValueError('Table lengths must all match for dstack')
n_row = n_rows.pop()
out = vstack(tables, join_type, metadata_conflicts)
for name, col in out.columns.items():
col = out[name]
# Reshape to so each original column is now in a row.
# If entries are not 0-dim then those additional shape dims
# are just carried along.
# [x x x y y y] => [[x x x],
# [y y y]]
new_shape = (len(tables), n_row) + col.shape[1:]
try:
col.shape = (len(tables), n_row) + col.shape[1:]
except AttributeError:
col = col.reshape(new_shape)
# Transpose the table and row axes to get to
# [[x, y],
# [x, y]
# [x, y]]
axes = np.arange(len(col.shape))
axes[:2] = [1, 0]
# This temporarily makes `out` be corrupted (columns of different
# length) but it all works out in the end.
out.columns.__setitem__(name, col.transpose(axes), validated=True)
return out
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
_check_join_type(join_type, 'vstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value,
but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
See Also
--------
Table.add_columns, Table.replace_column, Table.update
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
_check_join_type(join_type, 'hstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : table-like
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : {'first', 'last', 'none'}
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : bool
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
# Check for columns with masked values
for key in keys[:]:
col = input_table[key]
if hasattr(col, 'mask') and np.any(col.mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types)) from tme
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError(f'Key columns {names!r} have different shape')
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError(f'Columns have incompatible types {err._incompat_types}')
tme._incompat_types = err._incompat_types
raise tme from err
def _get_join_sort_idxs(keys, left, right):
# Go through each of the key columns in order and make columns for
# a new structured array that represents the lexical ordering of those
# key columns. This structured array is then argsort'ed. The trick here
# is that some columns (e.g. Time) may need to be expanded into multiple
# columns for ordering here.
ii = 0 # Index for uniquely naming the sort columns
sort_keys_dtypes = [] # sortable_table dtypes as list of (name, dtype_str, shape) tuples
sort_keys = [] # sortable_table (structured ndarray) column names
sort_left = {} # sortable ndarrays from left table
sort_right = {} # sortable ndarray from right table
for key in keys:
# get_sortable_arrays() returns a list of ndarrays that can be lexically
# sorted to represent the order of the column. In most cases this is just
# a single element of the column itself.
left_sort_cols = left[key].info.get_sortable_arrays()
right_sort_cols = right[key].info.get_sortable_arrays()
if len(left_sort_cols) != len(right_sort_cols):
# Should never happen because cols are screened beforehand for compatibility
raise RuntimeError('mismatch in sort cols lengths')
for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols):
# Check for consistency of shapes. Mismatch should never happen.
shape = left_sort_col.shape[1:]
if shape != right_sort_col.shape[1:]:
raise RuntimeError('mismatch in shape of left vs. right sort array')
if shape != ():
raise ValueError(f'sort key column {key!r} must be 1-d')
sort_key = str(ii)
sort_keys.append(sort_key)
sort_left[sort_key] = left_sort_col
sort_right[sort_key] = right_sort_col
# Build up dtypes for the structured array that gets sorted.
dtype_str = common_dtype([left_sort_col, right_sort_col])
sort_keys_dtypes.append((sort_key, dtype_str))
ii += 1
# Make the empty sortable table and fill it
len_left = len(left)
sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes)
for key in sort_keys:
sortable_table[key][:len_left] = sort_left[key]
sortable_table[key][len_left:] = sort_right[key]
# Finally do the (lexical) argsort and make a new sorted version
idx_sort = sortable_table.argsort(order=sort_keys)
sorted_table = sortable_table[idx_sort]
# Get indexes of unique elements (i.e. the group boundaries)
diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True]))
idxs = np.flatnonzero(diffs)
return idxs, idx_sort
def _apply_join_funcs(left, right, keys, join_funcs):
"""Apply join_funcs
"""
# Make light copies of left and right, then add new index columns.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
for key, join_func in join_funcs.items():
ids1, ids2 = join_func(left[key], right[key])
# Define a unique id_key name, and keep adding underscores until we have
# a name not yet present.
id_key = key + '_id'
while id_key in left.columns or id_key in right.columns:
id_key = id_key[:-2] + '_id'
keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys)
left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1
right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2
return left, right, keys
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn',
join_funcs=None,
keys_left=None, keys_right=None):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Special column name for cartesian join, should never collide with real column
cartesian_index_name = '__table_cartesian_join_temp_index__'
if join_type not in ('inner', 'outer', 'left', 'right', 'cartesian'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left', 'right', or 'cartesian' "
"(got '{}' instead)".
format(join_type))
if join_type == 'cartesian':
if keys:
raise ValueError('cannot supply keys for a cartesian join')
if join_funcs:
raise ValueError('cannot supply join_funcs for a cartesian join')
# Make light copies of left and right, then add temporary index columns
# with all the same value so later an outer join turns into a cartesian join.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
left[cartesian_index_name] = np.uint8(0)
right[cartesian_index_name] = np.uint8(0)
keys = (cartesian_index_name, )
# Handle the case of join key columns that are different between left and
# right via keys_left/keys_right args. This is done by saving the original
# input tables and making new left and right tables that contain only the
# key cols but with common column names ['0', '1', etc]. This sets `keys` to
# those fake key names in the left and right tables
if keys_left is not None or keys_right is not None:
left_orig = left
right_orig = right
left, right, keys = _join_keys_left_right(
left, right, keys, keys_left, keys_right, join_funcs)
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
# If we have a single key, put it in a tuple
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{} table does not have key column {!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{} key column {!r} has missing values'
.format(arr_label, name))
if join_funcs is not None:
if not all(key in keys for key in join_funcs):
raise ValueError(f'join_funcs keys {join_funcs.keys()} must be a '
f'subset of join keys {keys}')
left, right, keys = _apply_join_funcs(left, right, keys, join_funcs)
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
try:
idxs, idx_sort = _get_join_sort_idxs(keys, left, right)
except NotImplementedError:
raise TypeError('one or more key columns are not sortable')
# Now that we have idxs and idx_sort, revert to the original table args to
# carry on with making the output joined table. `keys` is set to to an empty
# list so that all original left and right columns are included in the
# output table.
if keys_left is not None or keys_right is not None:
keys = []
left = left_orig
right = right_orig
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3,
'cartesian': 1}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
out = _get_out_class([left, right])()
for out_name, dtype, shape in out_descrs:
if out_name == cartesian_index_name:
continue
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Select the correct elements from the original table
col = array[name][array_out]
# If the output column is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, col.shape)
try:
col[array_mask] = col.info.mask_val
except Exception as err: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
# Set the output table column to the new joined column
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs):
"""Do processing to handle keys_left / keys_right args for join.
This takes the keys_left/right inputs and turns them into a list of left/right
columns corresponding to those inputs (which can be column names or column
data values). It also generates the list of fake key column names (strings
of "1", "2", etc.) that correspond to the input keys.
"""
def _keys_to_cols(keys, table, label):
# Process input `keys`, which is a str or list of str column names in
# `table` or a list of column-like objects. The `label` is just for
# error reporting.
if isinstance(keys, str):
keys = [keys]
cols = []
for key in keys:
if isinstance(key, str):
try:
cols.append(table[key])
except KeyError:
raise ValueError(f'{label} table does not have key column {key!r}')
else:
if len(key) != len(table):
raise ValueError(f'{label} table has different length from key {key}')
cols.append(key)
return cols
if join_funcs is not None:
raise ValueError('cannot supply join_funcs arg and keys_left / keys_right')
if keys_left is None or keys_right is None:
raise ValueError('keys_left and keys_right must both be provided')
if keys is not None:
raise ValueError('keys arg must be None if keys_left and keys_right are supplied')
cols_left = _keys_to_cols(keys_left, left, 'left')
cols_right = _keys_to_cols(keys_right, right, 'right')
if len(cols_left) != len(cols_right):
raise ValueError('keys_left and keys_right args must have same length')
# Make two new temp tables for the join with only the join columns and
# key columns in common.
keys = [f'{ii}' for ii in range(len(cols_left))]
left = left.__class__(cols_left, names=keys, copy=False)
right = right.__class__(cols_right, names=keys, copy=False)
return left, right, keys
def _check_join_type(join_type, func_name):
"""Check join_type arg in hstack and vstack.
This specifically checks for the common mistake of call vstack(t1, t2)
instead of vstack([t1, t2]). The subsequent check of
``join_type in ('inner', ..)`` does not raise in this case.
"""
if not isinstance(join_type, str):
msg = '`join_type` arg must be a string'
if isinstance(join_type, Table):
msg += ('. Did you accidentally '
f'call {func_name}(t1, t2, ..) instead of '
f'{func_name}([t1, t2], ..)?')
raise TypeError(msg)
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(out_name, err._incompat_types)) from err
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
col[idx0:idx1] = array[name]
else:
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[idx0:idx1] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
idx0 = idx1
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if table_names is None:
table_names = [f'{ii + 1}' for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
n_rows = max(arr_lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
col = array[name][indices]
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[arr_len:] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
else:
col = array[name][:n_rows]
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
|
96256403fa381e0dcf27947de86da85a4bd9ba0b09218f5e935f8f308e8c00bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from importlib import import_module
import re
from copy import deepcopy
from collections import OrderedDict
import numpy as np
from astropy.utils.data_info import MixinInfo
from .column import Column, MaskedColumn
from .table import Table, QTable, has_info_class
from astropy.units.quantity import QuantityInfo
# TODO: some of this might be better done programmatically, through
# code like
# __construct_mixin_classes += tuple(
# f'astropy.coordinates.representation.{cls.__name__}'
# for cls in (list(coorep.REPRESENTATION_CLASSES.values())
# + list(coorep.DIFFERENTIAL_CLASSES.values()))
# if cls.__name__ in coorep.__all__)
# However, to avoid very hard to track import issues, the definition
# should then be done at the point where it is actually needed,
# using local imports. See also
# https://github.com/astropy/astropy/pull/10210#discussion_r419087286
__construct_mixin_classes = (
'astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.units.function.logarithmic.Magnitude',
'astropy.units.function.logarithmic.Decibel',
'astropy.units.function.logarithmic.Dex',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.ndarray_mixin.NdarrayMixin',
'astropy.table.table_helpers.ArrayWrapper',
'astropy.table.column.MaskedColumn',
'astropy.coordinates.representation.CartesianRepresentation',
'astropy.coordinates.representation.UnitSphericalRepresentation',
'astropy.coordinates.representation.RadialRepresentation',
'astropy.coordinates.representation.SphericalRepresentation',
'astropy.coordinates.representation.PhysicsSphericalRepresentation',
'astropy.coordinates.representation.CylindricalRepresentation',
'astropy.coordinates.representation.CartesianDifferential',
'astropy.coordinates.representation.UnitSphericalDifferential',
'astropy.coordinates.representation.SphericalDifferential',
'astropy.coordinates.representation.UnitSphericalCosLatDifferential',
'astropy.coordinates.representation.SphericalCosLatDifferential',
'astropy.coordinates.representation.RadialDifferential',
'astropy.coordinates.representation.PhysicsSphericalDifferential',
'astropy.coordinates.representation.CylindricalDifferential',
'astropy.utils.masked.core.MaskedNDArray',
)
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial in (('unit', lambda x: x is not None and x != ''),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
# Find column attributes that have the same length as the column itself.
# These will be stored in the table as new columns (aka "data attributes").
# Examples include SkyCoord.ra (what is typically considered the data and is
# always an array) and Skycoord.obs_time (which can be a scalar or an
# array).
data_attrs = [key for key, value in obj_attrs.items() if
getattr(value, 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data for
# MaskedColumn). For primary data, we attempt to store any info on
# the format, etc., on the column, but not for ancillary data (e.g.,
# no sense to use a float format for a mask).
is_primary = data_attr == col.info._represent_as_dict_primary_data
if is_primary:
new_name = name
new_info = info
else:
new_name = name + '.' + data_attr
new_info = {}
if not has_info_class(data, MixinInfo):
col_cls = MaskedColumn if (hasattr(data, 'mask')
and np.any(data.mask)) else Column
new_cols.append(col_cls(data, name=new_name, **new_info))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
if is_primary:
# Don't store info in the __serialized_columns__ dict for this column
# since this is redundant with info stored on the new column.
info = {}
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent,
# and store whatever remains.
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs.setdefault('__class__',
col.__module__ + '.' + col.__class__.__name__)
mixin_cols[name] = obj_attrs
def represent_mixins_as_columns(tbl, exclude_classes=()):
"""Represent input Table ``tbl`` using only `~astropy.table.Column`
or `~astropy.table.MaskedColumn` objects.
This function represents any mixin columns like `~astropy.time.Time` in
``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns
a new Table. A single mixin column may be split into multiple column
components as needed for fully representing the column. This includes the
possibility of recursive splitting, as shown in the example below. The
new column names are formed as ``<column_name>.<component>``, e.g.
``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``.
In addition to splitting columns, this function updates the table ``meta``
dictionary to include a dict named ``__serialized_columns__`` which provides
additional information needed to construct the original mixin columns from
the split columns.
This function is used by astropy I/O when writing tables to ECSV, FITS,
HDF5 formats.
Note that if the table does not include any mixin columns then the original
table is returned with no update to ``meta``.
Parameters
----------
tbl : `~astropy.table.Table` or subclass
Table to represent mixins as Columns
exclude_classes : tuple of class
Exclude any mixin columns which are instannces of any classes in the tuple
Returns
-------
tbl : `~astropy.table.Table`
New Table with updated columns, or else the original input ``tbl``
Examples
--------
>>> from astropy.table import Table, represent_mixins_as_columns
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord
>>> x = [100.0, 200.0]
>>> obstime = Time([1999.0, 2000.0], format='jyear')
>>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime)
>>> tbl = Table([sc, x], names=['sc', 'x'])
>>> represent_mixins_as_columns(tbl)
<Table length=2>
sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x
deg deg
float64 float64 float64 float64 float64
------- ------- -------------- -------------- -------
1.0 3.0 2451180.0 -0.25 100.0
2.0 4.0 2451545.0 0.0 200.0
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
# If no metadata was created then just return the original table.
if mixin_cols:
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
else:
out = tbl
for col in out.itercols():
if not isinstance(col, Column) and col.__class__ not in exclude_classes:
# This catches columns for which info has not been set up right and
# therefore were not converted. See the corresponding test in
# test_mixin.py for an example.
raise TypeError(
'failed to represent column '
f'{col.info.name!r} ({col.__class__.__name__}) as one '
'or more Column subclasses. This looks like a mixin class '
'that does not have the correct _represent_as_dict() method '
'in the class `info` attribute.')
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError(f'unsupported class for construct {cls_full_name}')
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
class _TableLite(OrderedDict):
"""
Minimal table-like object for _construct_mixin_from_columns. This allows
manipulating the object like a Table but without the actual overhead
for a full Table.
More pressing, there is an issue with constructing MaskedColumn, where the
encoded Column components (data, mask) are turned into a MaskedColumn.
When this happens in a real table then all other columns are immediately
Masked and a warning is issued. This is not desirable.
"""
def add_column(self, col, index=0):
colnames = self.colnames
self[col.info.name] = col
for ii, name in enumerate(colnames):
if ii >= index:
self.move_to_end(name)
@property
def colnames(self):
return list(self.keys())
def itercols(self):
return self.values()
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
out_name = f'{new_name}.{name}'
_construct_mixin_from_columns(out_name, val, out)
data_attrs_map[out_name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
obj_attrs[data_attr] = out[name]
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column. First step is to get that first column which
# has been moved from `out` to `obj_attrs` above.
data_attr = next(iter(data_attrs_map.values()))
col = obj_attrs[data_attr]
# Now copy the relevant attributes
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
meta = tbl.meta.copy()
mixin_cols = meta.pop('__serialized_columns__')
out = _TableLite(tbl.columns)
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
out_cls = QTable if has_quantities else Table
return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
|
3803d4f018c9f128ab44a84800013570dd6f86430357447cbe49546064f5766e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The SCEngine class uses the ``sortedcontainers`` package to implement an
Index engine for Tables.
"""
from collections import OrderedDict
from itertools import starmap
from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS
if HAS_SORTEDCONTAINERS:
from sortedcontainers import SortedList
class Node(object):
__slots__ = ('key', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) < (other.key, other.value)
return self.key < other
def __le__(self, other):
if other.__class__ is Node:
return (self.key, self.value) <= (other.key, other.value)
return self.key <= other
def __eq__(self, other):
if other.__class__ is Node:
return (self.key, self.value) == (other.key, other.value)
return self.key == other
def __ne__(self, other):
if other.__class__ is Node:
return (self.key, self.value) != (other.key, other.value)
return self.key != other
def __gt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) > (other.key, other.value)
return self.key > other
def __ge__(self, other):
if other.__class__ is Node:
return (self.key, self.value) >= (other.key, other.value)
return self.key >= other
__hash__ = None
def __repr__(self):
return f'Node({self.key!r}, {self.value!r})'
class SCEngine:
'''
Fast tree-based implementation for indexing, using the
``sortedcontainers`` package.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
'''
def __init__(self, data, row_index, unique=False):
node_keys = map(tuple, data)
self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
self._unique = unique
def add(self, key, value):
'''
Add a key, value pair.
'''
if self._unique and (key in self._nodes):
message = f'duplicate {key!r} in unique index'
raise ValueError(message)
self._nodes.add(Node(key, value))
def find(self, key):
'''
Find rows corresponding to the given key.
'''
return [node.value for node in self._nodes.irange(key, key)]
def remove(self, key, data=None):
'''
Remove data from the given key.
'''
if data is not None:
item = Node(key, data)
try:
self._nodes.remove(item)
except ValueError:
return False
return True
items = list(self._nodes.irange(key, key))
for item in items:
self._nodes.remove(item)
return bool(items)
def shift_left(self, row):
'''
Decrement rows larger than the given row.
'''
for node in self._nodes:
if node.value > row:
node.value -= 1
def shift_right(self, row):
'''
Increment rows greater than or equal to the given row.
'''
for node in self._nodes:
if node.value >= row:
node.value += 1
def items(self):
'''
Return a list of key, data tuples.
'''
result = OrderedDict()
for node in self._nodes:
if node.key in result:
result[node.key].append(node.value)
else:
result[node.key] = [node.value]
return result.items()
def sort(self):
'''
Make row order align with key order.
'''
for index, node in enumerate(self._nodes):
node.value = index
def sorted_data(self):
'''
Return a list of rows in order sorted by key.
'''
return [node.value for node in self._nodes]
def range(self, lower, upper, bounds=(True, True)):
'''
Return row values in the given range.
'''
iterator = self._nodes.irange(lower, upper, bounds)
return [node.value for node in iterator]
def replace_rows(self, row_map):
'''
Replace rows with the values in row_map.
'''
nodes = [node for node in self._nodes if node.value in row_map]
for node in nodes:
node.value = row_map[node.value]
self._nodes.clear()
self._nodes.update(nodes)
def __repr__(self):
if len(self._nodes) > 6:
nodes = list(self._nodes[:3]) + ['...'] + list(self._nodes[-3:])
else:
nodes = self._nodes
nodes_str = ', '.join(str(node) for node in nodes)
return f'<{self.__class__.__name__} nodes={nodes_str}>'
|
063b408848582eabfe45f09a87c6613ed8319d53254aabea0b6810bce4266a4c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __init__(self, columns, engine=None, unique=False):
# Local imports to avoid import problems.
from .table import Table, Column
from astropy.time import Time
if columns is not None:
columns = list(columns)
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd', scale=col.scale)
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError(f"Column does not belong to index: {col_name}")
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[self.col_position(col.info.name)] = vals[i]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError(f"Could not remove row {row} from index")
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __repr__(self):
col_names = tuple(col.info.name for col in self.columns)
return f'<{self.__class__.__name__} columns={col_names} data={self.data}>'
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : tuple, slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
elif isinstance(index_slice, slice): # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
else:
raise TypeError('index_slice must be tuple or slice')
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def get_index_or_copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.get_index_or_copy().insert_row(self.orig_coords(pos), vals, columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.get_index_or_copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.get_index_or_copy().sort()
def __repr__(self):
slice_str = '' if self.original else f' slice={self.start}:{self.stop}:{self.step}'
return (f'<{self.__class__.__name__} original={self.original}{slice_str}'
f' index={self.index}>')
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
index = Index([col_slice], engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
index = Index(new_cols, engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is None and table_copy is None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f'{names} is not a subset of table columns')
for name in names:
for index in table[name].info.indices:
if set([col.info.name for col in index.columns]) == names:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f'_{cls.__name__}WithIndexCopy'
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError(f'No matches found for key {key}')
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {key}')
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError(f'Right side should contain {len(rows)} values')
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError(f'Invalid index for iloc: {item}')
return table_slice
|
2a7110f80c98f5adf82ba5a6956cf2039621dea290b84866126f7edc9f35c40e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def _searchsorted(array, val, side='left'):
'''
Call np.searchsorted or use a custom binary
search if necessary.
'''
if hasattr(array, 'searchsorted'):
return array.searchsorted(val, side=side)
# Python binary search
begin = 0
end = len(array)
while begin < end:
mid = (begin + end) // 2
if val > array[mid]:
begin = mid + 1
elif val < array[mid]:
end = mid
elif side == 'right':
begin = mid + 1
else:
end = mid
return begin
class SortedArray:
'''
Implements a sorted array container using
a list of numpy arrays.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
'''
def __init__(self, data, row_index, unique=False):
self.data = data
self.row_index = row_index
self.num_cols = len(getattr(data, 'colnames', []))
self.unique = unique
@property
def cols(self):
return list(self.data.columns.values())
def add(self, key, row):
'''
Add a new entry to the sorted array.
Parameters
----------
key : tuple
Column values at the given row
row : int
Row number
'''
pos = self.find_pos(key, row) # first >= key
if self.unique and 0 <= pos < len(self.row_index) and \
all(self.data[pos][i] == key[i] for i in range(len(key))):
# already exists
raise ValueError(f'Cannot add duplicate value "{key}" in a unique index')
self.data.insert_row(pos, key)
self.row_index = self.row_index.insert(pos, row)
def _get_key_slice(self, i, begin, end):
'''
Retrieve the ith slice of the sorted array
from begin to end.
'''
if i < self.num_cols:
return self.cols[i][begin:end]
else:
return self.row_index[begin:end]
def find_pos(self, key, data, exact=False):
'''
Return the index of the largest key in data greater than or
equal to the given key, data pair.
Parameters
----------
key : tuple
Column key
data : int
Row number
exact : bool
If True, return the index of the given key in data
or -1 if the key is not present.
'''
begin = 0
end = len(self.row_index)
num_cols = self.num_cols
if not self.unique:
# consider the row value as well
key = key + (data,)
num_cols += 1
# search through keys in lexicographic order
for i in range(num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if exact and (t == len(key_slice) or key_slice[t] != key[i]):
# no match
return -1
elif t == len(key_slice) or (t == 0 and len(key_slice) > 0
and key[i] < key_slice[0]):
# too small or too large
return begin + t
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return begin
return begin
def find(self, key):
'''
Find all rows matching the given key.
Parameters
----------
key : tuple
Column values
Returns
-------
matching_rows : list
List of rows matching the input key
'''
begin = 0
end = len(self.row_index)
# search through keys in lexicographic order
for i in range(self.num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if t == len(key_slice) or key_slice[t] != key[i]:
# no match
return []
elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]:
# too small or too large
return []
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return []
return self.row_index[begin:end]
def range(self, lower, upper, bounds):
'''
Find values in the given range.
Parameters
----------
lower : tuple
Lower search bound
upper : tuple
Upper search bound
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
'''
lower_pos = self.find_pos(lower, 0)
upper_pos = self.find_pos(upper, 0)
if lower_pos == len(self.row_index):
return []
lower_bound = tuple([col[lower_pos] for col in self.cols])
if not bounds[0] and lower_bound == lower:
lower_pos += 1 # data[lower_pos] > lower
# data[lower_pos] >= lower
# data[upper_pos] >= upper
if upper_pos < len(self.row_index):
upper_bound = tuple([col[upper_pos] for col in self.cols])
if not bounds[1] and upper_bound == upper:
upper_pos -= 1 # data[upper_pos] < upper
elif upper_bound > upper:
upper_pos -= 1 # data[upper_pos] <= upper
return self.row_index[lower_pos:upper_pos + 1]
def remove(self, key, data):
'''
Remove the given entry from the sorted array.
Parameters
----------
key : tuple
Column values
data : int
Row number
Returns
-------
successful : bool
Whether the entry was successfully removed
'''
pos = self.find_pos(key, data, exact=True)
if pos == -1: # key not found
return False
self.data.remove_row(pos)
keep_mask = np.ones(len(self.row_index), dtype=bool)
keep_mask[pos] = False
self.row_index = self.row_index[keep_mask]
return True
def shift_left(self, row):
'''
Decrement all row numbers greater than the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index > row] -= 1
def shift_right(self, row):
'''
Increment all row numbers greater than or equal to the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index >= row] += 1
def replace_rows(self, row_map):
'''
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their entries deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
'''
num_rows = len(row_map)
keep_rows = np.zeros(len(self.row_index), dtype=bool)
tagged = 0
for i, row in enumerate(self.row_index):
if row in row_map:
keep_rows[i] = True
tagged += 1
if tagged == num_rows:
break
self.data = self.data[keep_rows]
self.row_index = np.array(
[row_map[x] for x in self.row_index[keep_rows]])
def items(self):
'''
Retrieve all array items as a list of pairs of the form
[(key, [row 1, row 2, ...]), ...]
'''
array = []
last_key = None
for i, key in enumerate(zip(*self.data.columns.values())):
row = self.row_index[i]
if key == last_key:
array[-1][1].append(row)
else:
last_key = key
array.append((key, [row]))
return array
def sort(self):
'''
Make row order align with key order.
'''
self.row_index = np.arange(len(self.row_index))
def sorted_data(self):
'''
Return rows in sorted order.
'''
return self.row_index
def __getitem__(self, item):
'''
Return a sliced reference to this sorted array.
Parameters
----------
item : slice
Slice to use for referencing
'''
return SortedArray(self.data[item], self.row_index[item])
def __repr__(self):
t = self.data.copy()
t['rows'] = self.row_index
return f'<{self.__class__.__name__} length={len(t)}>\n{t}'
|
74d945b8e25af2d5a74510c2585b7ad8c67bb880a8881cf09cb305eec7e60ff8 | # -*- coding: utf-8 -*-
ascii_coded = ('ΓββββββββββββββββββββββΓββββββββββββββββββββββΓββββββββββββββββββββββΓββββββββββ'
'ββββββββββββΓββββββββββββββββββββββΓββββββββββββββββββββββΓβββββββββββββββββββββ'
'βΓββββββββββββββββββββββΓββββββββββββββββββββββΓββββββββββββββββββββββΓβββββββββ'
'βββββββββββββΓββββββββββββββββββββββΓ')
ascii_uncoded = ''.join([chr(ord(c) - 200) for c in ascii_coded])
url = 'https://media.giphy.com/media/e24Q8FKE2mxRS/giphy.gif'
message_coded = 'ΔΔ©ΔΆΔ¬Δ©Δ»Γ·ΔΔ©ΔͺΔ΄ΔΓ¨Δ±ΔΆΔΌΔΔΊΔ©Δ«ΔΌΔ±Δ·ΔΆ'
message_uncoded = ''.join([chr(ord(c) - 200) for c in message_coded])
try:
from IPython import display
html = display.Image(url=url)._repr_html_()
class HTMLWithBackup(display.HTML):
def __init__(self, data, backup_text):
super().__init__(data)
self.backup_text = backup_text
def __repr__(self):
if self.backup_text is None:
return super().__repr__()
else:
return self.backup_text
dhtml = HTMLWithBackup(html, ascii_uncoded)
display.display(dhtml)
except ImportError:
print(ascii_uncoded)
except (UnicodeEncodeError, SyntaxError):
pass
|
f1ee16e162b20815876d2bc1e600a2b4c85cb8b2b7dc527b91caf1979ac11af1 | """
High-level operations for numpy structured arrays.
Some code and inspiration taken from numpy.lib.recfunctions.join_by().
Redistribution license restrictions apply.
"""
import collections
from collections import OrderedDict, Counter
from collections.abc import Sequence
import numpy as np
__all__ = ['TableMergeError']
class TableMergeError(ValueError):
pass
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types)) from tme
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns have different shape')
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError(f'Columns have incompatible types {incompat_types}')
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ('S', 'U'):
arr[0] = '0' * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = '`arrays` arg must be a sequence (e.g. list) of structured arrays'
if not isinstance(arrays, Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError('`arrays` arg must include at least one array')
|
d95774d69e7b29e829461969e1ddbf88bb2a3356ea8c13e0513bf44d7728212f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from .errors import *
from .angles import *
from .baseframe import *
from .attributes import *
from .distances import *
from .earth import *
from .transformations import *
from .builtin_frames import *
from .name_resolve import *
from .matching import *
from .representation import *
from .sky_coordinate import *
from .funcs import *
from .calculation import *
from .solar_system import *
from .spectral_quantity import *
from .spectral_coordinate import *
from .angle_utilities import *
|
05acf53b2bd558e8e021442eb12f9e2b4b4d268be0eb466a999e35bd805044ad | import warnings
from textwrap import indent
import astropy.units as u
import numpy as np
from astropy.constants import c
from astropy.coordinates import (ICRS,
CartesianDifferential,
CartesianRepresentation, SkyCoord)
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SpectralCoord']
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
# Default distance to use for target when none is provided
DEFAULT_DISTANCE = 1e6 * u.kpc
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ['SpectralCoord.*']
def _apply_relativistic_doppler_shift(scoord, velocity):
"""
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`
that is Doppler shifted by this amount.
Note that the Doppler shift applied is the full relativistic one, so
`SpectralQuantity` currently expressed in velocity and not using the
relativistic convention will temporarily be converted to use the
relativistic convention while the shift is applied.
Positive velocities are assumed to redshift the spectral quantity,
while negative velocities blueshift the spectral quantity.
"""
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
# since we can't guarantee that their metadata would be correct/consistent.
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (squantity.unit.is_equivalent(u.Hz) or
squantity.unit.is_equivalent(u.eV) or
squantity.unit.is_equivalent(1 / u.m)):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. "
"This should not happen, so please report this in the "
"astropy issue tracker!")
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False):
"""
Given an original coordinate object, update the differentials so that
the final coordinate is at the same location as the original coordinate
but co-moving with the velocity reference object.
If preserve_original_frame is set to True, the resulting object will be in
the frame of the original coordinate, otherwise it will be in the frame of
the velocity reference.
"""
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
# the original and velocity_reference frame are time-dependent)
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation,
CartesianDifferential).differentials
data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation)
.with_differentials(differentials))
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(representation_type=CartesianRepresentation,
differential_type=CartesianDifferential)
def attach_zero_velocities(coord):
"""
Set the differentials to be stationary on a coordinate object.
"""
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if 's' in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the `astropy-dev mailing list`_ or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km/u.s)
def __new__(cls, value, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError("Cannot specify radial velocity or redshift if both "
"target and observer are specified")
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError('redshift should be dimensionless')
radial_velocity = redshift.to(u.km / u.s, u.doppler_redshift())
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, 'observer', None)
if target is None:
target = getattr(value, 'target', None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, 'radial_velocity', None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label='observer')
obj._target = cls._validate_coordinate(target, label='target')
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, '_radial_velocity', None)
self._observer = getattr(obj, '_observer', None)
self._target = getattr(obj, '_target', None)
@staticmethod
def _validate_coordinate(coord, label=''):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance")
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all='ignore'):
distance = getattr(coord, 'distance', None)
if distance is not None and distance.unit.physical_type == 'dimensionless':
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"arbitrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if 's' not in coord.data.differentials:
warnings.warn(
f"No velocity defined on frame, assuming {ZERO_VELOCITIES}.",
NoVelocityWarning)
coord = attach_zero_velocities(coord)
return coord
def replicate(self, value=None, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
doppler_convention=None, doppler_rest=None,
copy=False):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError("Cannot specify value as a Quantity and also specify unit")
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None:
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoVelocityWarning)
return self.__class__(value=value, unit=unit,
observer=observer, target=target,
radial_velocity=radial_velocity, redshift=redshift,
doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label='observer')
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label='target')
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity` ['speed']
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(self._observer, self._target,
as_scalar=True)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
`astropy.units.Quantity`
Redshift of target.
"""
return self.radial_velocity.to(u.dimensionless_unscaled, u.doppler_redshift())
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
as_scalar : bool
If `True`, the magnitude of the velocity vector will be returned,
otherwise the full vector will be returned.
Returns
-------
`~astropy.units.Quantity` ['speed']
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (target.cartesian.without_differentials() -
observer.cartesian.without_differentials())
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km/u.s)
def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError("This method can only be used if both observer "
"and target are defined on the SpectralCoord.")
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km))
if frame.data.differentials:
if velocity is not None:
raise ValueError('frame already has differentials, cannot also specify velocity')
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(frame.data.with_differentials(differentials))
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError('velocity should be a Quantity vector with 3 elements')
frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m,
*velocity,
representation_type='cartesian',
differential_type='cartesian')
observer = update_differentials_to_match(self.observer, frame,
preserve_observer_frame=preserve_observer_frame)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (self.target is None or
self.observer is None):
raise ValueError("Both an observer and target must be defined "
"before applying a velocity shift.")
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError("Argument must have unit physical type "
"'speed' for radial velocty or "
"'dimensionless' for redshift.")
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == 'dimensionless':
target_shift = target_shift.to(u.km / u.s, u.doppler_redshift())
if self._observer is None or self._target is None:
return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == 'dimensionless':
observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift())
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = (target_icrs
.realize_frame(target_icrs.cartesian.with_differentials(target_velocity))
.transform_to(self._target))
new_observer = (observer_icrs
.realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity))
.transform_to(self._observer))
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data,
observer=new_observer,
target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = 'Undefined'
repr_items = [f'{prefixstr}']
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * ' ').lstrip()
repr_items.append(f' observer: {observer_repr}')
if self.target is not None:
target_repr = indent(repr(self.target), 12 * ' ').lstrip()
repr_items.append(f' target: {target_repr}')
if (self._observer is not None and self._target is not None) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(' observer to target (computed from above):')
else:
repr_items.append(' observer to target:')
repr_items.append(f' radial_velocity={radial_velocity}')
repr_items.append(f' redshift={redshift}')
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f' doppler_rest={self.doppler_rest}')
repr_items.append(f' doppler_convention={self.doppler_convention}')
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=' ')
if len(repr_items) == 1:
repr_items[0] += f'{arrstr}{self._unitstr:s}'
else:
repr_items[1] = ' (' + repr_items[1].lstrip()
repr_items[-1] += ')'
repr_items.append(f' {arrstr}{self._unitstr:s}')
return '\n'.join(repr_items) + '>'
|
a6f98ac67a7295edbaaf85d6df817f784391b384cc1cc60710fba44f515f1205 | import numpy as np
from astropy.units import si
from astropy.units import equivalencies as eq
from astropy.units import Unit
from astropy.units.quantity import SpecificTypeQuantity, Quantity
from astropy.units.decorators import quantity_input
__all__ = ['SpectralQuantity']
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ['SpectralQuantity.*']
KMS = si.km / si.s
SPECTRAL_UNITS = (si.Hz, si.m, si.J, si.m ** -1, KMS)
DOPPLER_CONVENTIONS = {
'radio': eq.doppler_radio,
'optical': eq.doppler_optical,
'relativistic': eq.doppler_relativistic
}
class SpectralQuantity(SpecificTypeQuantity):
"""
One or more value(s) with spectral units.
The spectral units should be those for frequencies, wavelengths, energies,
wavenumbers, or velocities (interpreted as Doppler velocities relative to a
rest spectral value). The advantage of using this class over the regular
`~astropy.units.Quantity` class is that in `SpectralQuantity`, the
``u.spectral`` equivalency is enabled by default (allowing automatic
conversion between spectral units), and a preferred Doppler rest value and
convention can be stored for easy conversion to/from velocities.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralQuantity`
Spectral axis data values.
unit : unit-like
Unit for the given data.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value to use for conversions from/to velocities
doppler_convention : str, optional
The convention to use when converting the spectral data to/from
velocities.
"""
_equivalent_unit = SPECTRAL_UNITS
_include_easy_conversion_members = True
def __new__(cls, value, unit=None,
doppler_rest=None, doppler_convention=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# If we're initializing from an existing SpectralQuantity, keep any
# parameters that aren't being overridden
if doppler_rest is None:
doppler_rest = getattr(value, 'doppler_rest', None)
if doppler_convention is None:
doppler_convention = getattr(value, 'doppler_convention', None)
obj._doppler_rest = doppler_rest
obj._doppler_convention = doppler_convention
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._doppler_rest = getattr(obj, '_doppler_rest', None)
self._doppler_convention = getattr(obj, '_doppler_convention', None)
def __quantity_subclass__(self, unit):
# Always default to just returning a Quantity, unless we explicitly
# choose to return a SpectralQuantity - even if the units match, we
# want to avoid doing things like adding two SpectralQuantity instances
# together and getting a SpectralQuantity back
if unit is self.unit:
return SpectralQuantity, True
else:
return Quantity, False
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# We always return Quantity except in a few specific cases
result = super().__array_ufunc__(function, method, *inputs, **kwargs)
if ((function is np.multiply
or function is np.true_divide and inputs[0] is self)
and result.unit == self.unit
or (function in (np.minimum, np.maximum, np.fmax, np.fmin)
and method in ('reduce', 'reduceat'))):
result = result.view(self.__class__)
result.__array_finalize__(self)
else:
if result is self:
raise TypeError(f"Cannot store the result of this operation in {self.__class__.__name__}")
if result.dtype.kind == 'b':
result = result.view(np.ndarray)
else:
result = result.view(Quantity)
return result
@property
def doppler_rest(self):
"""
The rest value of the spectrum used for transformations to/from
velocity space.
Returns
-------
`~astropy.units.Quantity` ['speed']
Rest value as an astropy `~astropy.units.Quantity` object.
"""
return self._doppler_rest
@doppler_rest.setter
@quantity_input(value=SPECTRAL_UNITS)
def doppler_rest(self, value):
"""
New rest value needed for velocity-space conversions.
Parameters
----------
value : `~astropy.units.Quantity` ['speed']
Rest value.
"""
if self._doppler_rest is not None:
raise AttributeError("doppler_rest has already been set, and cannot "
"be changed. Use the ``to`` method to convert "
"the spectral values(s) to use a different "
"rest value")
self._doppler_rest = value
@property
def doppler_convention(self):
"""
The defined convention for conversions to/from velocity space.
Returns
-------
str
One of 'optical', 'radio', or 'relativistic' representing the
equivalency used in the unit conversions.
"""
return self._doppler_convention
@doppler_convention.setter
def doppler_convention(self, value):
"""
New velocity convention used for velocity space conversions.
Parameters
----------
value
Notes
-----
More information on the equations dictating the transformations can be
found in the astropy documentation [1]_.
References
----------
.. [1] Astropy documentation: https://docs.astropy.org/en/stable/units/equivalencies.html#spectral-doppler-equivalencies
"""
if self._doppler_convention is not None:
raise AttributeError("doppler_convention has already been set, and cannot "
"be changed. Use the ``to`` method to convert "
"the spectral values(s) to use a different "
"convention")
if value is not None and value not in DOPPLER_CONVENTIONS:
raise ValueError(f"doppler_convention should be one of {'/'.join(sorted(DOPPLER_CONVENTIONS))}")
self._doppler_convention = value
@quantity_input(doppler_rest=SPECTRAL_UNITS)
def to(self, unit,
equivalencies=[],
doppler_rest=None,
doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(f"doppler_convention should be one of {'/'.join(sorted(DOPPLER_CONVENTIONS))}")
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError("Either both or neither doppler_rest and "
"doppler_convention should be defined for "
"velocity conversions")
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](self._doppler_rest)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError("doppler_convention not set, cannot convert to/from velocities")
if doppler_rest is None:
raise ValueError("doppler_rest not set, cannot convert to/from velocities")
additional_equivalencies = additional_equivalencies + DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = super().to(unit, equivalencies=equivalencies + additional_equivalencies)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
def to_value(self, unit=None, *args, **kwargs):
if unit is None:
return self.view(np.ndarray)
return self.to(unit, *args, **kwargs).value
|