input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
"""An example config:: artifactor: log_dir: /home/test/workspace/cfme_tests/artiout per_run: test #test, run, None reuse_dir: True squash_exceptions: False threaded: False server_address: 127.0.0.1 server_port: 21212 server_enabled: True plugins: ``log_dir`` is the destination for all artifacts ``per_run`` denotes if the test artifacts should be group by run, test, or None ``reuse_dir`` if this is False and Artifactor comes across a dir that has already been used, it will die """ import atexit import os import subprocess from threading import RLock import diaper import pytest from artifactor import ArtifactorClient from cfme.fixtures.pytest_store import store from cfme.fixtures.pytest_store import write_line from cfme.markers.polarion import extract_polarion_ids from cfme.utils.appliance import find_appliance from cfme.utils.blockers import Blocker from cfme.utils.blockers import BZ from cfme.utils.conf import credentials from cfme.utils.conf import env from cfme.utils.log import logger from cfme.utils.net import net_check from cfme.utils.net import random_port from cfme.utils.wait import wait_for UNDER_TEST = False # set to true for artifactor using tests # Create a list of all our passwords for use with the sanitize request later in this module # Filter out all Nones as it will mess the output up. words = [word for word in {v.get('password') for v in credentials.values()} if word is not None] def get_test_idents(item): try: return item.location[2], item.location[0] except AttributeError: try: return item.fspath.strpath, None except AttributeError: return (None, None) def get_name(obj): return (getattr(obj, '_param_name', None) or getattr(obj, 'name', None) or str(obj)) class DummyClient(object): def fire_hook(self, *args, **kwargs): return def terminate(self): return def task_status(self): return def __bool__(self): # DummyClient is always False, # so it's easy to see if we have an artiactor client return False def get_client(art_config, pytest_config): if art_config and not UNDER_TEST: port = getattr(pytest_config.option, 'artifactor_port', None) or \ art_config.get('server_port') or random_port() pytest_config.option.artifactor_port = port art_config['server_port'] = port return ArtifactorClient( art_config['server_address'], art_config['server_port']) else: return DummyClient() def spawn_server(config, art_client): if store.slave_manager or UNDER_TEST: return None import subprocess cmd = ['miq-artifactor-server', '--port', str(art_client.port)] if config.getvalue('run_id'): cmd.append('--run-id') cmd.append(str(config.getvalue('run_id'))) proc = subprocess.Popen(cmd) return proc session_ver = None session_build = None session_stream = None session_fw_version = None def pytest_addoption(parser): parser.addoption("--run-id", action="store", default=None, help="A run id to assist in logging") @pytest.hookimpl(tryfirst=True) def pytest_configure(config): if config.getoption('--help'): return art_client = get_client( art_config=env.get('artifactor', {}), pytest_config=config) # just in case if not store.slave_manager: with diaper: atexit.register(shutdown, config) if art_client: config._art_proc = spawn_server(config, art_client) wait_for( net_check, func_args=[art_client.port, '127.0.0.1'], func_kwargs={'force': True}, num_sec=10, message="wait for artifactor to start") art_client.ready = True else: config._art_proc = None from cfme.utils.log import artifactor_handler artifactor_handler.artifactor = art_client if store.slave_manager: artifactor_handler.slaveid = store.slaveid config._art_client = art_client def fire_art_hook(config, hook, **hook_args): client = getattr(config, '_art_client', None) if client is None: assert UNDER_TEST, 'missing artifactor is only valid for inprocess tests' else: return client.fire_hook(hook, **hook_args) def fire_art_test_hook(node, hook, **hook_args): name, location = get_test_idents(node) return fire_art_hook( node.config, hook, test_name=name, test_location=location, **hook_args) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): global session_ver global session_build global session_stream appliance = find_appliance(item) if not session_ver: session_ver = str(appliance.version) session_build = appliance.build session_stream = appliance.version.stream() if str(session_ver) not in session_build: session_build = "{}-{}".format(str(session_ver), session_build) session_fw_version = None try: proc = subprocess.Popen(['git', 'describe', '--tags'], stdout=subprocess.PIPE) proc.wait() session_fw_version = proc.stdout.read().strip() except Exception: pass # already set session_fw_version to None fire_art_hook( item.config, 'session_info', version=session_ver, build=session_build, stream=session_stream, fw_version=session_fw_version ) tier = item.get_closest_marker('tier') if tier: tier = tier.args[0] requirement = item.get_closest_marker('requirement') if requirement: requirement = requirement.args[0] param_dict = {} try: params = item.callspec.params param_dict = {p: get_name(v) for p, v in params.items()} except Exception: pass # already set param_dict ip = appliance.hostname # This pre_start_test hook is needed so that filedump is able to make get the test # object set up before the logger starts logging. As the logger fires a nested hook # to the filedumper, and we can't specify order inriggerlib. meta = item.get_closest_marker('meta') if meta and 'blockers' in meta.kwargs: blocker_spec = meta.kwargs['blockers'] blockers = [] for blocker in blocker_spec: if isinstance(blocker, int): blockers.append(BZ(blocker).url) else: blockers.append(Blocker.parse(blocker).url) else: blockers = [] fire_art_test_hook( item, 'pre_start_test', slaveid=store.slaveid, ip=ip) fire_art_test_hook( item, 'start_test', slaveid=store.slaveid, ip=ip, tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers) yield def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) app = find_appliance(item) ip = app.hostname fire_art_test_hook( item, 'finish_test', slaveid=store.slaveid, ip=ip, wait_for_task=True) fire_art_test_hook(item, 'sanitize', words=words) jenkins_data = { 'build_url': os.environ.get('BUILD_URL'), 'build_number': os.environ.get('BUILD_NUMBER'), 'git_commit': os.environ.get('GIT_COMMIT'), 'job_name': os.environ.get('JOB_NAME') } param_dict = None try: caps = app.browser.widgetastic.selenium.capabilities param_dict = { 'browserName': caps.get('browserName', 'Unknown'), 'browserPlatform': caps.get('platformName', caps.get('platform', 'Unknown')), 'browserVersion': caps.get('browserVersion', caps.get('version', 'Unknown')) } except Exception: logger.exception("Couldn't grab browser env_vars") pass # already set param_dict fire_art_test_hook( item, 'ostriz_send', env_params=param_dict, slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data) def pytest_runtest_logreport(report): if store.slave_manager: return # each node does its own reporting config = store.config # tech debt name, location = get_test_idents(report) xfail = hasattr(report, 'wasxfail') if hasattr(report, 'skipped'): if report.skipped: fire_art_hook( config, 'filedump', test_location=location, test_name=name, description="Short traceback", contents=report.longreprtext, file_type="short_tb", group_id="skipped") fire_art_hook( config, 'report_test', test_location=location, test_name=name, test_xfail=xfail, test_when=report.when, test_outcome=report.outcome, test_phase_duration=report.duration) fire_art_hook(config, 'build_report') @pytest.hookimpl(hookwrapper=True) def pytest_unconfigure(config): yield shutdown(config) lock = RLock() def shutdown(config): app = find_appliance(config, require=False) if app is not None: with lock: proc = config._art_proc if proc and proc.returncode is None: if not store.slave_manager: write_line('collecting artifacts') fire_art_hook(config, 'finish_session') if not store.slave_manager: config._art_client.terminate() proc.wait()
import operator from collections import namedtuple import fauxfactory import pytest from cfme import test_requirements from cfme.cloud.provider import CloudProvider from cfme.containers.provider import ContainersProvider from cfme.infrastructure.config_management import ConfigManager from cfme.infrastructure.config_management import ConfigSystem from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE_PER_CATEGORY from cfme.physical.provider import PhysicalProvider from cfme.services.myservice import MyService from cfme.services.workloads import TemplatesImages from cfme.services.workloads import VmsInstances from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ SearchParam = namedtuple("SearchParam", ["collection", "destination", "entity", "filter", "my_filters"]) pytestmark = [ pytest.mark.uncollectif(lambda param, appliance: (param.collection in [ConfigManager, 'ansible_tower_providers'] or param.filter == 'Job Template (Ansible Tower) : Name') or (appliance.version >= '5.11' and param.entity == 'network_load_balancers'), reason='load balancers are no longer supported in 5.11 -> BZ 1672949'), pytest.mark.meta(automates=[BZ(1402392)]) # should be only on test_filter_crud ] def _navigation(param, appliance): if isinstance(param.collection, str): view = navigate_to(getattr(appliance.collections, param.collection), param.destination) else: view = navigate_to(param.collection, param.destination) return view def _filter_displayed(filters, filter): if filters.is_displayed: assert filter, "Filter wasn't created!" else: pytest.fail("Filter wasn't created or filters tree is not displayed!") def _select_filter(filters, filter_name, param): if param.my_filters: if isinstance(param.my_filters, tuple): filters.tree.click_path(param.my_filters[1], "My Filters", filter_name) else: filters.tree.click_path("My Filters", filter_name) else: filters.navigation.select(filter_name) def _can_open_advanced_search(param, appliance): """ Polarion: assignee: anikifor casecomponent: WebUI caseimportance: critical initialEstimate: 1/10h """ view = _navigation(param, appliance) assert view.search.is_advanced_search_possible, (f"Advanced search not displayed " f"for {param.entity} " f"on {param.destination.lower()}") view.search.open_advanced_search() assert view.search.is_advanced_search_opened, (f"Advanced search failed to open " f"for {param.entity} " f"on {param.destination.lower()}") view.search.close_advanced_search() assert not view.search.is_advanced_search_opened, (f"Advanced search failed to close " f"for {param.entity} " f"on {param.destination.lower()}") def _filter_crud(param, appliance): """ Polarion: assignee: anikifor casecomponent: WebUI caseimportance: high initialEstimate: 1/10h """ filter_name = fauxfactory.gen_string('alphanumeric', 10) filter_value = fauxfactory.gen_string('alphanumeric', 10) filter_value_updated = fauxfactory.gen_string('alphanumeric', 10) view = _navigation(param, appliance) # create if ':' not in param.filter: # to test "Count of" field, values don't contain ':' filter_value = fauxfactory.gen_numeric_string(3) filter_value_updated = fauxfactory.gen_numeric_string(3) view.search.save_filter( "fill_count({}, =, {})".format(param.filter, filter_value), filter_name) else: view.search.save_filter( "fill_field({}, =, {})".format(param.filter, filter_value), filter_name) view.search.close_advanced_search() view.flash.assert_no_error() # read if param.my_filters: if isinstance(param.my_filters, tuple): filters = operator.attrgetter(param.my_filters[0])(view) _filter_displayed(filters, filters.tree.has_path(param.my_filters[1], "My Filters", filter_name)) else: filters = operator.attrgetter(param.my_filters)(view) _filter_displayed(filters, filters.tree.has_path("My Filters", filter_name)) else: filters = view.my_filters _filter_displayed(filters, filters.navigation.has_item(filter_name)) # update _select_filter(filters, filter_name, param) view.search.open_advanced_search() view.search.advanced_search_form.search_exp_editor.select_first_expression() if ':' not in param.filter: # to test "Count of" field view.search.advanced_search_form.search_exp_editor.fill_count(count=param.filter, key='=', value=filter_value_updated) else: view.search.advanced_search_form.search_exp_editor.fill_field(field=param.filter, key='=', value=filter_value_updated) # save expression view.search.advanced_search_form.save_filter_button.click() # save filter view.search.advanced_search_form.save_filter_button.click() view.search.close_advanced_search() _select_filter(filters, filter_name, param) # read after update view.search.open_advanced_search() exp_text = view.search.advanced_search_form.search_exp_editor.expression_text assert filter_value_updated in exp_text, "Filter wasn't changed!" # delete view.search.delete_filter() view.search.close_advanced_search() if param.my_filters: if filters.is_displayed: if isinstance(param.my_filters, tuple): assert not filters.tree.has_path(param.my_filters[1], "My Filters", filter_name), "Filter wasn't deleted!" else: assert not filters.tree.has_path("My Filters", filter_name), "Filter wasn't deleted!" else: if view.my_filters.is_displayed: assert not view.my_filters.navigation.has_item(filter_name), "Filter wasn't deleted!" _tests = [_can_open_advanced_search, _filter_crud] def methodized(metafunc): """Transform function to method by adding self argument works just for specific functions in this file, would be nice to generalize TODO generalize for more tests with possibly different arguments """ def func(self, param, appliance): return metafunc(param, appliance) func.__doc__ = metafunc.__doc__ return func def inject_tests(metaclass): """Attach tests to decorated class uses _tests - list of test functions """ for test in _tests: method = methodized(test) setattr(metaclass, f"test{test.__name__}", method) return metaclass def base_pytestmarks(param_values, setup_prov=False): return [ test_requirements.filtering, pytest.mark.parametrize( 'param', param_values, ids=['{}-{}'.format(param.entity, param.destination.lower()) for param in param_values], scope="class" )] + ([pytest.mark.usefixtures("setup_provider")] if setup_prov else []) @inject_tests @pytest.mark.provider([CloudProvider], selector=ONE_PER_CATEGORY) class TestCloud(object): params_values = [ SearchParam('cloud_providers', 'All', 'cloudprovider', 'Cloud Provider : Name', None), SearchParam('cloud_av_zones', 'All', 'availabilityzone', 'Availability Zone : Name', None), SearchParam('cloud_host_aggregates', 'All', 'hostaggregate', 'Host Aggregate : Name', None), SearchParam('cloud_tenants', 'All', 'tenant', 'Cloud Tenant : Name', None), SearchParam('cloud_flavors', 'All', 'flavor', 'Flavor : Name', None), SearchParam('cloud_instances', 'All', 'instances', 'Instance : Name', ('sidebar.instances', "All Instances")), SearchParam('cloud_images', 'All', 'images', 'Image : Name', ('sidebar.images', "All Images")), SearchParam('cloud_stacks', 'All', 'orchestration_stacks', 'Orchestration Stack : Name', None), SearchParam('cloud_keypairs', 'All', 'key_pairs', 'Key Pair : Name', None) ] pytestmark = base_pytestmarks(params_values, True) @inject_tests @pytest.mark.provider([CloudProvider], selector=ONE_PER_CATEGORY) class TestNetwork(object): params_values = [ SearchParam('network_providers', 'All', 'network_managers', 'Network Manager : Name', None), SearchParam('network_providers', 'All', 'network_managers', 'Network Manager : Name', None), SearchParam('cloud_networks', 'All', 'network_networks', 'Cloud Network : Name', None), SearchParam('network_subnets', 'All', 'network_subnets', 'Cloud Subnet : Name', None), SearchParam('network_routers', 'All', 'network_routers', 'Network Router : Name', None), SearchParam('network_security_groups', 'All', 'network_security_groups', 'Security Group : Name', None), SearchParam('network_floating_ips', 'All', 'network_floating_ips', 'Floating IP : Address', None), SearchParam('network_ports', 'All', 'network_ports', 'Network Port : Name', None), SearchParam('balancers', 'All', 'network_load_balancers', 'Load Balancer : Name', None)] pytestmark = base_pytestmarks(params_values, True) @inject_tests @pytest.mark.provider([InfraProvider], selector=ONE_PER_CATEGORY, ) class TestInfra(object): params_values = [ SearchParam('infra_providers', 'All', 'infraproviders', 'Infrastructure Provider : Name', None), SearchParam('clusters', 'All', 'clusters', 'Cluster / Deployment Role : Name', None), SearchParam('hosts', 'All', 'hosts', 'Host / Node : Name', None), SearchParam('hosts', 'All', 'hosts', 'Host / Node.VMs', None), SearchParam('infra_vms', 'VMsOnly', 'vms', 'Virtual Machine : Name', ('sidebar.vms', "All VMs")), SearchParam('infra_templates', 'TemplatesOnly', 'templates', 'Template : Name', ('sidebar.templates', "All Templates")), SearchParam('resource_pools', 'All', 'resource_pools', 'Resource Pool : Name', None), SearchParam('datastores', 'All', 'datastores', 'Datastore : Name', ('sidebar.datastores', "All Datastores")), SearchParam(VmsInstances, 'All', 'workloads_vms', 'VM and Instance : Name', ('vms', "All VMs & Instances")), SearchParam(TemplatesImages, 'All', 'workloads_templates', 'VM Template and Image : Name', ('templates', "All Templates & Images")), ] pytestmark = base_pytestmarks(params_values, True) @inject_tests @pytest.mark.provider([PhysicalProvider], selector=ONE_PER_CATEGORY) class TestPhysical(object): params_values = [ SearchParam('physical_providers', 'All', 'physical_providers', 'Physical Infrastructure Provider : Name', None), SearchParam('physical_servers', 'All', 'physical_servers', 'Physical Server : Name', None) ] pytestmark = base_pytestmarks(params_values, True) @inject_tests @pytest.mark.provider([ContainersProvider], selector=ONE_PER_CATEGORY) class TestContainers(object): params_values = [ SearchParam( 'containers_providers', 'All', 'container_providers', 'Containers Provider : Name', None), SearchParam('container_projects', 'All', 'container_projects', 'Container Project : Name', None), SearchParam('container_routes', 'All', 'container_routes', 'Container Route : Name', None), SearchParam('container_services', 'All', 'container_services', 'Container Service : Name', None), SearchParam('container_replicators', 'All', 'container_replicators', 'Container Replicator : Name', None), SearchParam('container_pods', 'All', 'container_pods', 'Container Pod : Name', None), SearchParam('containers', 'All', 'containers', 'Container : Name', None), SearchParam('container_nodes', 'All', 'container_nodes', 'Container Node : Name', None), SearchParam('container_volumes', 'All', 'container_volumes', 'Persistent Volume : Name', None), SearchParam('container_builds', 'All', 'container_builds', 'Container Build : Name', None), SearchParam('container_image_registries', 'All', 'image_registries', 'Container Image Registry : Name', None), SearchParam('container_images', 'All', 'container_images', 'Container Image : Name', None), SearchParam('container_templates', 'All', 'container_templates', 'Container Template : Name', None) ] pytestmark = base_pytestmarks(params_values, True) @inject_tests class TestAnsibleTower(object): params_values = [ SearchParam('ansible_tower_providers', 'All', 'ansible_tower_explorer_provider', 'Automation Manager (Ansible Tower) : Name', ('sidebar.providers', 'All Ansible Tower Providers')), SearchParam('ansible_tower_systems', 'All', 'ansible_tower_explorer_system', 'Configured System (Ansible Tower) : Hostname', ('sidebar.configured_systems', 'All Ansible Tower Configured Systems')), SearchParam('ansible_tower_job_templates', 'All', 'ansible_tower_explorer_job_templates', 'Job Template (Ansible Tower) : Name', ('sidebar.job_templates', 'All Ansible Tower Job Templates')), SearchParam('ansible_tower_jobs', 'All', 'ansible_tower_jobs', 'Ansible Tower Job : Name', None)] pytestmark = base_pytestmarks(params_values) @inject_tests class TestStorage(object): params_values = [ SearchParam('volumes', 'All', 'block_store_volumes', 'Cloud Volume : Name', None), SearchParam('volume_snapshots', 'All', 'block_store_snapshots', 'Cloud Volume Snapshot : Name', None), SearchParam('volume_backups', 'All', 'block_store_backups', 'Cloud Volume Backup : Name', None), SearchParam('object_store_containers', 'All', 'object_store_containers', 'Cloud Object Store Container : Name', None), SearchParam('object_store_objects', 'All', 'object_store_objects', 'Cloud Object Store Object : Name', None), ] pytestmark = base_pytestmarks(params_values) @inject_tests class TestConfigManagement(object): params_values = [ SearchParam(ConfigManager, 'All', 'configuration_management', 'Configuration Manager : Name', ('sidebar.providers', "All Configuration Management Providers")), SearchParam(ConfigSystem, 'All', 'configuration_management_systems', 'Configured System (Red Hat Satellite) : Hostname', ('sidebar.configured_systems', "All Configured Systems")), ] pytestmark = base_pytestmarks(params_values) @inject_tests @pytest.mark.meta(blockers=[BZ(1733489)]) class TestServices(object): params_values = [SearchParam(MyService, 'All', 'myservices', 'Service : Name', 'myservice')] pytestmark = base_pytestmarks(params_values)
izapolsk/integration_tests
cfme/tests/webui/test_advanced_search.py
cfme/fixtures/artifactor_plugin.py
""" Platform for the Aladdin Connect cover component. For more details about this platform, please refer to the documentation https://home-assistant.io/components/cover.aladdin_connect/ """ import logging import voluptuous as vol from homeassistant.components.cover import (CoverDevice, PLATFORM_SCHEMA, SUPPORT_OPEN, SUPPORT_CLOSE) from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, STATE_CLOSED, STATE_OPENING, STATE_CLOSING, STATE_OPEN) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['aladdin_connect==0.3'] _LOGGER = logging.getLogger(__name__) NOTIFICATION_ID = 'aladdin_notification' NOTIFICATION_TITLE = 'Aladdin Connect Cover Setup' STATES_MAP = { 'open': STATE_OPEN, 'opening': STATE_OPENING, 'closed': STATE_CLOSED, 'closing': STATE_CLOSING } SUPPORTED_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Aladdin Connect platform.""" from aladdin_connect import AladdinConnectClient username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) acc = AladdinConnectClient(username, password) try: if not acc.login(): raise ValueError("Username or Password is incorrect") add_entities(AladdinDevice(acc, door) for door in acc.get_doors()) except (TypeError, KeyError, NameError, ValueError) as ex: _LOGGER.error("%s", ex) hass.components.persistent_notification.create( 'Error: {}<br />' 'You will need to restart hass after fixing.' ''.format(ex), title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID) class AladdinDevice(CoverDevice): """Representation of Aladdin Connect cover.""" def __init__(self, acc, device): """Initialize the cover.""" self._acc = acc self._device_id = device['device_id'] self._number = device['door_number'] self._name = device['name'] self._status = STATES_MAP.get(device['status']) @property def device_class(self): """Define this cover as a garage door.""" return 'garage' @property def supported_features(self): """Flag supported features.""" return SUPPORTED_FEATURES @property def unique_id(self): """Return a unique ID.""" return '{}-{}'.format(self._device_id, self._number) @property def name(self): """Return the name of the garage door.""" return self._name @property def is_opening(self): """Return if the cover is opening or not.""" return self._status == STATE_OPENING @property def is_closing(self): """Return if the cover is closing or not.""" return self._status == STATE_CLOSING @property def is_closed(self): """Return None if status is unknown, True if closed, else False.""" if self._status is None: return None return self._status == STATE_CLOSED def close_cover(self, **kwargs): """Issue close command to cover.""" self._acc.close_door(self._device_id, self._number) def open_cover(self, **kwargs): """Issue open command to cover.""" self._acc.open_door(self._device_id, self._number) def update(self): """Update status of cover.""" acc_status = self._acc.get_door_status(self._device_id, self._number) self._status = STATES_MAP.get(acc_status)
"""The tests the cover command line platform.""" import os import tempfile from unittest import mock import pytest from homeassistant.components.cover import DOMAIN import homeassistant.components.cover.command_line as cmd_rs from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_CLOSE_COVER, SERVICE_OPEN_COVER, SERVICE_STOP_COVER) from homeassistant.setup import async_setup_component @pytest.fixture def rs(hass): """Return CommandCover instance.""" return cmd_rs.CommandCover(hass, 'foo', 'command_open', 'command_close', 'command_stop', 'command_state', None) def test_should_poll_new(rs): """Test the setting of polling.""" assert rs.should_poll is True rs._command_state = None assert rs.should_poll is False def test_query_state_value(rs): """Test with state value.""" with mock.patch('subprocess.check_output') as mock_run: mock_run.return_value = b' foo bar ' result = rs._query_state_value('runme') assert 'foo bar' == result assert mock_run.call_count == 1 assert mock_run.call_args == mock.call('runme', shell=True) async def test_state_value(hass): """Test with state value.""" with tempfile.TemporaryDirectory() as tempdirname: path = os.path.join(tempdirname, 'cover_status') test_cover = { 'command_state': 'cat {}'.format(path), 'command_open': 'echo 1 > {}'.format(path), 'command_close': 'echo 1 > {}'.format(path), 'command_stop': 'echo 0 > {}'.format(path), 'value_template': '{{ value }}' } assert await async_setup_component(hass, DOMAIN, { 'cover': { 'platform': 'command_line', 'covers': { 'test': test_cover } } }) is True assert 'unknown' == hass.states.get('cover.test').state await hass.services.async_call( DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: 'cover.test'}, blocking=True) assert 'open' == hass.states.get('cover.test').state await hass.services.async_call( DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: 'cover.test'}, blocking=True) assert 'open' == hass.states.get('cover.test').state await hass.services.async_call( DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: 'cover.test'}, blocking=True) assert 'closed' == hass.states.get('cover.test').state
PetePriority/home-assistant
tests/components/cover/test_command_line.py
homeassistant/components/cover/aladdin_connect.py
""" Special models useful for complex compound models where control is needed over which outputs from a source model are mapped to which inputs of a target model. """ from .core import Model from ..extern.six.moves import range __all__ = ['Mapping', 'Identity'] class Mapping(Model): """ Allows inputs to be reordered, duplicated or dropped. Parameters ---------- mapping : tuple A tuple of integers representing indices of the inputs to this model to return and in what order to return them. See :ref:`compound-model-mappings` for more details. n_inputs : int Number of inputs; if `None` (default) then ``max(mapping) + 1`` is used (i.e. the highest input index used in the mapping). name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Raises ------ TypeError Raised when number of inputs is less that ``max(mapping)``. Examples -------- >>> from astropy.modeling.models import Polynomial2D, Shift, Mapping >>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) >>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1) >>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2) >>> model(1, 2) # doctest: +FLOAT_CMP (17.0, 14.2) """ def __init__(self, mapping, n_inputs=None, name=None, meta=None): if n_inputs is None: self._inputs = tuple('x' + str(idx) for idx in range(max(mapping) + 1)) else: self._inputs = tuple('x' + str(idx) for idx in range(n_inputs)) self._outputs = tuple('x' + str(idx) for idx in range(len(mapping))) self._mapping = mapping super(Mapping, self).__init__(name=name, meta=meta) @property def inputs(self): """ The name(s) of the input variable(s) on which a model is evaluated. """ return self._inputs @property def outputs(self): """The name(s) of the output(s) of the model.""" return self._outputs @property def mapping(self): """Integers representing indices of the inputs.""" return self._mapping def __repr__(self): if self.name is None: return '<Mapping({0})>'.format(self.mapping) else: return '<Mapping({0}, name={1})>'.format(self.mapping, self.name) def evaluate(self, *args): if len(args) != self.n_inputs: name = self.name if self.name is not None else "Mapping" raise TypeError('{0} expects {1} inputs; got {2}'.format( name, self.n_inputs, len(args))) result = tuple(args[idx] for idx in self._mapping) if self.n_outputs == 1: return result[0] return result @property def inverse(self): """ A `Mapping` representing the inverse of the current mapping. Raises ------ `NotImplementedError` An inverse does no exist on mappings that drop some of its inputs (there is then no way to reconstruct the inputs that were dropped). """ try: mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs)) except ValueError: raise NotImplementedError( "Mappings such as {0} that drop one or more of their inputs " "are not invertible at this time.".format(self.mapping)) inv = self.__class__(mapping) inv._inputs = self._outputs inv._outputs = self._inputs return inv class Identity(Mapping): """ Returns inputs unchanged. This class is useful in compound models when some of the inputs must be passed unchanged to the next model. Parameters ---------- n_inputs : int Specifies the number of inputs this identity model accepts. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Examples -------- Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs:: >>> from astropy.modeling.models import (Polynomial1D, Shift, Scale, ... Identity) >>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2) >>> model(1,1) # doctest: +FLOAT_CMP (2.4, 2.0) >>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP (1.0, 1.0) """ def __init__(self, n_inputs, name=None, meta=None): mapping = tuple(range(n_inputs)) super(Identity, self).__init__(mapping, name=name, meta=meta) def __repr__(self): if self.name is None: return '<Identity({0})>'.format(self.n_inputs) else: return '<Identity({0}, name={1})>'.format(self.n_inputs, self.name) @property def inverse(self): """ The inverse transformation. In this case of `Identity`, ``self.inverse is self``. """ return self
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests models.parameters """ from __future__ import (absolute_import, division, print_function, unicode_literals) import itertools import numpy as np from numpy.testing import utils from . import irafutil from .. import models, fitting from ..core import Model, FittableModel from ..parameters import Parameter, InputParameterError from ...utils.data import get_pkg_data_filename from ...tests.helper import pytest def setter1(val): return val def setter2(val, model): model.do_something(val) return val * model.p class SetterModel(FittableModel): inputs = ('x', 'y') outputs = ('z',) xc = Parameter(default=1, setter=setter1) yc = Parameter(default=1, setter=setter2) def __init__(self, xc, yc, p): self.p = p # p is a value intended to be used by the setter super(SetterModel, self).__init__() self.xc = xc self.yc = yc def evaluate(self, x, y, xc, yc): return ((x - xc)**2 + (y - yc)**2) def do_something(self, v): pass class TParModel(Model): """ A toy model to test parameters machinery """ coeff = Parameter() e = Parameter() def __init__(self, coeff, e, **kwargs): super(TParModel, self).__init__(coeff=coeff, e=e, **kwargs) @staticmethod def evaluate(coeff, e): pass class MockModel(FittableModel): alpha = Parameter(name='alpha', default=42) @staticmethod def evaluate(*args): pass def test_parameter_properties(): """Test if getting / setting of Parameter properties works.""" m = MockModel() p = m.alpha assert p.name == 'alpha' # Parameter names are immutable with pytest.raises(AttributeError): p.name = 'beta' assert p.fixed is False p.fixed = True assert p.fixed is True assert p.tied is False p.tied = lambda _: 0 p.tied = False assert p.tied is False assert p.min is None p.min = 42 assert p.min == 42 p.min = None assert p.min is None assert p.max is None # TODO: shouldn't setting a max < min give an error? p.max = 41 assert p.max == 41 def test_parameter_operators(): """Test if the parameter arithmetic operators work.""" m = MockModel() par = m.alpha num = 42. val = 3 assert par - val == num - val assert val - par == val - num assert par / val == num / val assert val / par == val / num assert par ** val == num ** val assert val ** par == val ** num assert par < 45 assert par > 41 assert par <= par assert par >= par assert par == par assert -par == -num assert abs(par) == abs(num) class TestParameters(object): def setup_class(self): """ Unit tests for parameters Read an iraf database file created by onedspec.identify. Use the information to create a 1D Chebyshev model and perform the same fit. Create also a gausian model. """ test_file = get_pkg_data_filename('data/idcompspec.fits') f = open(test_file) lines = f.read() reclist = lines.split("begin") f.close() record = irafutil.IdentifyRecord(reclist[1]) self.icoeff = record.coeff order = int(record.fields['order']) self.model = models.Chebyshev1D(order - 1) self.gmodel = models.Gaussian1D(2, mean=3, stddev=4) self.linear_fitter = fitting.LinearLSQFitter() self.x = record.x self.y = record.z self.yy = np.array([record.z, record.z]) def test_set_slice(self): """ Tests updating the parameters attribute with a slice. This is what fitters internally do. """ self.model.parameters[:] = np.array([3, 4, 5, 6, 7]) assert (self.model.parameters == [3., 4., 5., 6., 7.]).all() def test_set_parameters_as_list(self): """Tests updating parameters using a list.""" self.model.parameters = [30, 40, 50, 60, 70] assert (self.model.parameters == [30., 40., 50., 60, 70]).all() def test_set_parameters_as_array(self): """Tests updating parameters using an array.""" self.model.parameters = np.array([3, 4, 5, 6, 7]) assert (self.model.parameters == [3., 4., 5., 6., 7.]).all() def test_set_as_tuple(self): """Tests updating parameters using a tuple.""" self.model.parameters = (1, 2, 3, 4, 5) assert (self.model.parameters == [1, 2, 3, 4, 5]).all() def test_set_model_attr_seq(self): """ Tests updating the parameters attribute when a model's parameter (in this case coeff) is updated. """ self.model.parameters = [0, 0., 0., 0, 0] self.model.c0 = 7 assert (self.model.parameters == [7, 0., 0., 0, 0]).all() def test_set_model_attr_num(self): """Update the parameter list when a model's parameter is updated.""" self.gmodel.amplitude = 7 assert (self.gmodel.parameters == [7, 3, 4]).all() def test_set_item(self): """Update the parameters using indexing.""" self.model.parameters = [1, 2, 3, 4, 5] self.model.parameters[0] = 10. assert (self.model.parameters == [10, 2, 3, 4, 5]).all() assert self.model.c0 == 10 def test_wrong_size1(self): """ Tests raising an error when attempting to reset the parameters using a list of a different size. """ with pytest.raises(InputParameterError): self.model.parameters = [1, 2, 3] def test_wrong_size2(self): """ Tests raising an exception when attempting to update a model's parameter (in this case coeff) with a sequence of the wrong size. """ with pytest.raises(InputParameterError): self.model.c0 = [1, 2, 3] def test_wrong_shape(self): """ Tests raising an exception when attempting to update a model's parameter and the new value has the wrong shape. """ with pytest.raises(InputParameterError): self.gmodel.amplitude = [1, 2] def test_par_against_iraf(self): """ Test the fitter modifies model.parameters. Uses an iraf example. """ new_model = self.linear_fitter(self.model, self.x, self.y) print(self.y, self.x) utils.assert_allclose(new_model.parameters, np.array( [4826.1066602783685, 952.8943813407858, 12.641236013982386, -1.7910672553339604, 0.90252884366711317]), rtol=10 ** (-2)) def testPolynomial1D(self): d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14} p1 = models.Polynomial1D(3, **d) utils.assert_equal(p1.parameters, [11, 12, 13, 14]) def test_poly1d_multiple_sets(self): p1 = models.Polynomial1D(3, n_models=3) utils.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) utils.assert_array_equal(p1.c0, [0, 0, 0]) p1.c0 = [10, 10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_par_slicing(self): """ Test assigning to a parameter slice """ p1 = models.Polynomial1D(3, n_models=3) p1.c0[:2] = [10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_poly2d(self): p2 = models.Polynomial2D(degree=3) p2.c0_0 = 5 utils.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0]) def test_poly2d_multiple_sets(self): kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5], 'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]} p2 = models.Polynomial2D(2, **kw) utils.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5, 1, 1, 2, 2, 5, 5]) def test_shift_model_parameters1d(self): sh1 = models.Shift(2) sh1.offset = 3 assert sh1.offset == 3 assert sh1.offset.value == 3 def test_scale_model_parametersnd(self): sc1 = models.Scale([2, 2]) sc1.factor = [3, 3] assert np.all(sc1.factor == [3, 3]) utils.assert_array_equal(sc1.factor.value, [3, 3]) def test_parameters_wrong_shape(self): sh1 = models.Shift(2) with pytest.raises(InputParameterError): sh1.offset = [3, 3] class TestMultipleParameterSets(object): def setup_class(self): self.x1 = np.arange(1, 10, .1) self.y, self.x = np.mgrid[:10, :7] self.x11 = np.array([self.x1, self.x1]).T self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7], n_models=2) def test_change_par(self): """ Test that a change to one parameter as a set propagates to param_sets. """ self.gmodel.amplitude = [1, 10] utils.assert_almost_equal( self.gmodel.param_sets, np.array([[1., 10], [3.5, 5.2], [0.4, 0.7]])) np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7]) def test_change_par2(self): """ Test that a change to one single parameter in a set propagates to param_sets. """ self.gmodel.amplitude[0] = 11 utils.assert_almost_equal( self.gmodel.param_sets, np.array([[11., 10], [3.5, 5.2], [0.4, 0.7]])) np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7]) def test_change_parameters(self): self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7] utils.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.]) utils.assert_almost_equal(self.gmodel.mean.value, [9., 5.2]) class TestParameterInitialization(object): """ This suite of tests checks most if not all cases if instantiating a model with parameters of different shapes/sizes and with different numbers of parameter sets. """ def test_single_model_scalar_parameters(self): t = TParModel(10, 1) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[10], [1]]) assert np.all(t.parameters == [10, 1]) assert t.coeff.shape == () assert t.e.shape == () def test_single_model_scalar_and_array_parameters(self): t = TParModel(10, [1, 2]) assert len(t) == 1 assert t.model_set_axis is False assert np.issubdtype(t.param_sets.dtype, object) assert len(t.param_sets) == 2 assert np.all(t.param_sets[0] == [10]) assert np.all(t.param_sets[1] == [[1, 2]]) assert np.all(t.parameters == [10, 1, 2]) assert t.coeff.shape == () assert t.e.shape == (2,) def test_single_model_1d_array_parameters(self): t = TParModel([10, 20], [1, 2]) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]]) assert np.all(t.parameters == [10, 20, 1, 2]) assert t.coeff.shape == (2,) assert t.e.shape == (2,) def test_single_model_1d_array_different_length_parameters(self): with pytest.raises(InputParameterError): # Not broadcastable t = TParModel([1, 2], [3, 4, 5]) def test_single_model_2d_array_parameters(self): t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]]) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[[10, 20], [30, 40]]], [[[1, 2], [3, 4]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2, 2) def test_single_model_2d_non_square_parameters(self): coeff = np.array([[10, 20], [30, 40], [50, 60]]) e = np.array([[1, 2], [3, 4], [5, 6]]) t = TParModel(coeff, e) assert len(t) == 1 assert t.model_set_axis is False assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]], [[[1, 2], [3, 4], [5, 6]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6]) assert t.coeff.shape == (3, 2) assert t.e.shape == (3, 2) t2 = TParModel(coeff.T, e.T) assert len(t2) == 1 assert t2.model_set_axis is False assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]], [[[1, 3, 5], [2, 4, 6]]]]) assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60, 1, 3, 5, 2, 4, 6]) assert t2.coeff.shape == (2, 3) assert t2.e.shape == (2, 3) # Not broadcastable with pytest.raises(InputParameterError): TParModel(coeff, e.T) with pytest.raises(InputParameterError): TParModel(coeff.T, e) def test_single_model_2d_broadcastable_parameters(self): t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3]) assert len(t) == 1 assert t.model_set_axis is False assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, object) assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]]) assert np.all(t.param_sets[1] == [[1, 2, 3]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3]) @pytest.mark.parametrize(('p1', 'p2'), [ (1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]), ([1, 2], [3, 4, 5])]) def test_two_model_incorrect_scalar_parameters(self, p1, p2): with pytest.raises(InputParameterError): TParModel(p1, p2, n_models=2) @pytest.mark.parametrize('kwargs', [ {'n_models': 2}, {'model_set_axis': 0}, {'n_models': 2, 'model_set_axis': 0}]) def test_two_model_scalar_parameters(self, kwargs): t = TParModel([10, 20], [1, 2], **kwargs) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[10, 20], [1, 2]]) assert np.all(t.parameters == [10, 20, 1, 2]) assert t.coeff.shape == () assert t.e.shape == () @pytest.mark.parametrize('kwargs', [ {'n_models': 2}, {'model_set_axis': 0}, {'n_models': 2, 'model_set_axis': 0}]) def test_two_model_scalar_and_array_parameters(self, kwargs): t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs) assert len(t) == 2 assert t.model_set_axis == 0 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, object) assert np.all(t.param_sets[0] == [[10], [20]]) assert np.all(t.param_sets[1] == [[1, 2], [3, 4]]) assert np.all(t.parameters == [10, 20, 1, 2, 3, 4]) assert t.coeff.shape == () assert t.e.shape == (2,) def test_two_model_1d_array_parameters(self): t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[[10, 20], [30, 40]], [[1, 2], [3, 4]]]) assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4]) assert t.coeff.shape == (2,) assert t.e.shape == (2,) t2 = TParModel([[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]], n_models=2) assert len(t2) == 2 assert t2.model_set_axis == 0 assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]]]) assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6]) assert t2.coeff.shape == (3,) assert t2.e.shape == (3,) def test_two_model_mixed_dimension_array_parameters(self): with pytest.raises(InputParameterError): # Can't broadcast different array shapes TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[9, 10, 11], [12, 13, 14]], n_models=2) t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[1, 2], [3, 4]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, object) assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]) assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2,) def test_two_model_2d_array_parameters(self): t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2) assert len(t) == 2 assert t.model_set_axis == 0 assert np.all(t.param_sets == [[[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]) assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4, 5, 6, 7, 8]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2, 2) def test_two_model_nonzero_model_set_axis(self): # An example where the model set axis is the *last* axis of the # parameter arrays coeff = np.array([[[10, 20], [30, 40]], [[50, 60], [70, 80]]]) coeff = np.rollaxis(coeff, 0, 3) e = np.array([[1, 2], [3, 4]]) e = np.rollaxis(e, 0, 2) t = TParModel(coeff, e, model_set_axis=-1) assert len(t) == 2 assert t.model_set_axis == -1 assert len(t.param_sets) == 2 assert np.issubdtype(t.param_sets.dtype, object) assert np.all(t.param_sets[0] == [[[10, 50], [20, 60]], [[30, 70], [40, 80]]]) assert np.all(t.param_sets[1] == [[[1, 3], [2, 4]]]) assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 40, 80, 1, 3, 2, 4]) assert t.coeff.shape == (2, 2) assert t.e.shape == (2,) def test_wrong_number_of_params(self): with pytest.raises(InputParameterError): TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2) with pytest.raises(InputParameterError): TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0) def test_wrong_number_of_params2(self): with pytest.raises(InputParameterError): m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2) with pytest.raises(InputParameterError): m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0) def test_array_parameter1(self): with pytest.raises(InputParameterError): t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0) def test_array_parameter2(self): with pytest.raises(InputParameterError): m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11), model_set_axis=0) def test_array_parameter4(self): """ Test multiple parameter model with array-valued parameters of the same size as the number of parameter sets. """ t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False) assert len(t4) == 1 assert t4.coeff.shape == (2, 2) assert t4.e.shape == (2,) assert np.issubdtype(t4.param_sets.dtype, object) assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]]) assert np.all(t4.param_sets[1] == [5, 6]) def test_non_broadcasting_parameters(): """ Tests that in a model with 3 parameters that do not all mutually broadcast, this is determined correctly regardless of what order the parameters are in. """ a = 3 b = np.array([[1, 2, 3], [4, 5, 6]]) c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) class TestModel(Model): p1 = Parameter() p2 = Parameter() p3 = Parameter() def evaluate(self, *args): return # a broadcasts with both b and c, but b does not broadcast with c for args in itertools.permutations((a, b, c)): with pytest.raises(InputParameterError): TestModel(*args) def test_setter(): pars = np.random.rand(20).reshape((10,2)) model = SetterModel(-1, 3, np.pi) for x, y in pars: model.x = x model.y = y utils.assert_almost_equal(model(x, y), (x + 1)**2 + (y - np.pi * 3)**2)
tbabej/astropy
astropy/modeling/tests/test_parameters.py
astropy/modeling/mappings.py
"""Tests for hermite_e module. """ from __future__ import division, absolute_import, print_function from functools import reduce import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, ) He0 = np.array([1]) He1 = np.array([0, 1]) He2 = np.array([-1, 0, 1]) He3 = np.array([0, -3, 0, 1]) He4 = np.array([3, 0, -6, 0, 1]) He5 = np.array([0, 15, 0, -10, 0, 1]) He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] def trim(x): return herme.hermetrim(x, tol=1e-6) class TestConstants(object): def test_hermedomain(self): assert_equal(herme.hermedomain, [-1, 1]) def test_hermezero(self): assert_equal(herme.hermezero, [0]) def test_hermeone(self): assert_equal(herme.hermeone, [1]) def test_hermex(self): assert_equal(herme.hermex, [0, 1]) class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_hermeadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = herme.hermeadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermesub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = herme.hermesub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermemulx(self): assert_equal(herme.hermemulx([0]), [0]) assert_equal(herme.hermemulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [i, 0, 1] assert_equal(herme.hermemulx(ser), tgt) def test_hermemul(self): # check values of result for i in range(5): pol1 = [0]*i + [1] val1 = herme.hermeval(self.x, pol1) for j in range(5): msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = herme.hermeval(self.x, pol2) pol3 = herme.hermemul(pol1, pol2) val3 = herme.hermeval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) assert_almost_equal(val3, val1*val2, err_msg=msg) def test_hermediv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = herme.hermeadd(ci, cj) quo, rem = herme.hermediv(tgt, ci) res = herme.hermeadd(herme.hermemul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermepow(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) c = np.arange(i + 1) tgt = reduce(herme.hermemul, [c]*j, np.array([1])) res = herme.hermepow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([4., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermeval(self): #check empty input assert_equal(herme.hermeval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): msg = "At i=%d" % i tgt = y[i] res = herme.hermeval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(herme.hermeval(x, [1]).shape, dims) assert_equal(herme.hermeval(x, [1, 0]).shape, dims) assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_hermeint(self): # check exceptions assert_raises(TypeError, herme.hermeint, [0], .5) assert_raises(ValueError, herme.hermeint, [0], -1) assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) assert_raises(ValueError, herme.hermeint, [0], scl=[0]) assert_raises(TypeError, herme.hermeint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = herme.hermeint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i]) res = herme.herme2poly(hermeint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) assert_almost_equal(herme.hermeval(-1, hermeint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) res = herme.herme2poly(hermeint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1) res = herme.hermeint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k]) res = herme.hermeint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_hermeint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T res = herme.hermeint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeint(c) for c in c2d]) res = herme.hermeint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) res = herme.hermeint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_hermeder(self): # check exceptions assert_raises(TypeError, herme.hermeder, [0], .5) assert_raises(ValueError, herme.hermeder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = herme.hermeder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herme.hermeder( herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_hermeder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T res = herme.hermeder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeder(c) for c in c2d]) res = herme.hermeder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_hermevander(self): # check for 1d x x = np.arange(3) v = herme.hermevander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = herme.hermevander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) def test_hermevander2d(self): # also tests hermeval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = herme.hermevander2d(x1, x2, [1, 2]) tgt = herme.hermeval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herme.hermevander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_hermevander3d(self): # also tests hermeval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) tgt = herme.hermeval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_hermefit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, herme.hermefit, [1], [1], -1) assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) assert_raises(TypeError, herme.hermefit, [], [1], 0) assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) assert_raises(TypeError, herme.hermefit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = herme.hermefit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(herme.hermeval(x, coef3), y) coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(herme.hermeval(x, coef3), y) # coef4 = herme.hermefit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) # coef2d = herme.hermefit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = herme.hermefit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) # test fitting only even Legendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = herme.hermefit(x, y, 4) assert_almost_equal(herme.hermeval(x, coef1), y) coef2 = herme.hermefit(x, y, [0, 2, 4]) assert_almost_equal(herme.hermeval(x, coef2), y) assert_almost_equal(coef1, coef2) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, herme.hermecompanion, []) assert_raises(ValueError, herme.hermecompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(herme.hermecompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) class TestGauss(object): def test_100(self): x, w = herme.hermegauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = herme.hermevander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = np.sqrt(2*np.pi) assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_hermefromroots(self): res = herme.hermefromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = herme.hermefromroots(roots) res = herme.hermeval(roots, pol) tgt = 0 assert_(len(pol) == i + 1) assert_almost_equal(herme.herme2poly(pol)[-1], 1) assert_almost_equal(res, tgt) def test_hermeroots(self): assert_almost_equal(herme.hermeroots([1]), []) assert_almost_equal(herme.hermeroots([1, 1]), [-1]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = herme.hermeroots(herme.hermefromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_hermetrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, herme.hermetrim, coef, -1) # Test results assert_equal(herme.hermetrim(coef), coef[:-1]) assert_equal(herme.hermetrim(coef, 1), coef[:-3]) assert_equal(herme.hermetrim(coef, 2), [0]) def test_hermeline(self): assert_equal(herme.hermeline(3, 4), [3, 4]) def test_herme2poly(self): for i in range(10): assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) def test_poly2herme(self): for i in range(10): assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) tgt = np.exp(-.5*x**2) res = herme.hermeweight(x) assert_almost_equal(res, tgt)
from __future__ import division, absolute_import, print_function import sys import itertools import pytest import numpy as np from numpy.core._multiarray_tests import solve_diophantine, internal_overlap from numpy.core import _umath_tests from numpy.lib.stride_tricks import as_strided from numpy.compat import long from numpy.testing import ( assert_, assert_raises, assert_equal, assert_array_equal ) if sys.version_info[0] >= 3: xrange = range ndims = 2 size = 10 shape = tuple([size] * ndims) MAY_SHARE_BOUNDS = 0 MAY_SHARE_EXACT = -1 def _indices_for_nelems(nelems): """Returns slices of length nelems, from start onwards, in direction sign.""" if nelems == 0: return [size // 2] # int index res = [] for step in (1, 2): for sign in (-1, 1): start = size // 2 - nelems * step * sign // 2 stop = start + nelems * step * sign res.append(slice(start, stop, step * sign)) return res def _indices_for_axis(): """Returns (src, dst) pairs of indices.""" res = [] for nelems in (0, 2, 3): ind = _indices_for_nelems(nelems) # no itertools.product available in Py2.4 res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" return res def _indices(ndims): """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" ind = _indices_for_axis() # no itertools.product available in Py2.4 res = [[]] for i in range(ndims): newres = [] for elem in ind: for others in res: newres.append([elem] + others) res = newres return res def _check_assignment(srcidx, dstidx): """Check assignment arr[dstidx] = arr[srcidx] works.""" arr = np.arange(np.product(shape)).reshape(shape) cpy = arr.copy() cpy[dstidx] = arr[srcidx] arr[dstidx] = arr[srcidx] assert_(np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) def test_overlapping_assignments(): # Test automatically generated assignments which overlap in memory. inds = _indices(ndims) for ind in inds: srcidx = tuple([a[0] for a in ind]) dstidx = tuple([a[1] for a in ind]) _check_assignment(srcidx, dstidx) @pytest.mark.slow def test_diophantine_fuzz(): # Fuzz test the diophantine solver rng = np.random.RandomState(1234) max_int = np.iinfo(np.intp).max for ndim in range(10): feasible_count = 0 infeasible_count = 0 min_count = 500//(ndim + 1) while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) U_max = min(max_int-1, U_max) A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) for j in range(ndim)) U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) for j in range(ndim)) b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) b = rng.randint(-1, b_ub+2, dtype=np.intp) if ndim == 0 and feasible_count < min_count: b = 0 X = solve_diophantine(A, U, b) if X is None: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is None, (A, U, b, X_simplified)) # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) try: ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) except OverflowError: # xrange on 32-bit Python 2 may overflow continue size = 1 for r in ranges: size *= len(r) if size < 100000: assert_(not any(sum(w) == b for w in itertools.product(*ranges))) infeasible_count += 1 else: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity assert_(sum(a*x for a, x in zip(A, X)) == b) assert_(all(0 <= x <= ub for x, ub in zip(X, U))) feasible_count += 1 def test_diophantine_overflow(): # Smoke test integer overflow detection max_intp = np.iinfo(np.intp).max max_int64 = np.iinfo(np.int64).max if max_int64 <= max_intp: # Check that the algorithm works internally in 128-bit; # solving this problem requires large intermediate numbers A = (max_int64//2, max_int64//2 - 10) U = (max_int64//2, max_int64//2 - 10) b = 2*(max_int64//2) - 10 assert_equal(solve_diophantine(A, U, b), (1, 1)) def check_may_share_memory_exact(a, b): got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) assert_equal(np.may_share_memory(a, b), np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) a.fill(0) b.fill(0) a.fill(1) exact = b.any() err_msg = "" if got != exact: err_msg = " " + "\n ".join([ "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), "shape_a = %r" % (a.shape,), "shape_b = %r" % (b.shape,), "strides_a = %r" % (a.strides,), "strides_b = %r" % (b.strides,), "size_a = %r" % (a.size,), "size_b = %r" % (b.size,) ]) assert_equal(got, exact, err_msg=err_msg) def test_may_share_memory_manual(): # Manual test cases for may_share_memory # Base arrays xs0 = [ np.zeros([13, 21, 23, 22], dtype=np.int8), np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] ] # Generate all negative stride combinations xs = [] for x in xs0: for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): xp = x[ss] xs.append(xp) for x in xs: # The default is a simple extent check assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) # Exact checks check_may_share_memory_exact(x[:,0,:], x[:,1,:]) check_may_share_memory_exact(x[:,::7], x[:,3::3]) try: xp = x.ravel() if xp.flags.owndata: continue xp = xp.view(np.int16) except ValueError: continue # 0-size arrays cannot overlap check_may_share_memory_exact(x.ravel()[6:6], xp.reshape(13, 21, 23, 11)[:,::7]) # Test itemsize is dealt with check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)) check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)[:,3::3]) check_may_share_memory_exact(x.ravel()[6:7], xp.reshape(13, 21, 23, 11)[:,::7]) # Check unit size x = np.zeros([1], dtype=np.int8) check_may_share_memory_exact(x, x) check_may_share_memory_exact(x, x.copy()) def iter_random_view_pairs(x, same_steps=True, equal_size=False): rng = np.random.RandomState(1234) if equal_size and same_steps: raise ValueError() def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) def random_slice_fixed_size(n, step, size): start = rng.randint(0, n+1 - size*step) stop = start + (size-1)*step + 1 if rng.randint(0, 2) == 0: stop, start = start-1, stop-1 if stop < 0: stop = None step *= -1 return slice(start, stop, step) # First a few regular views yield x, x for j in range(1, 7, 3): yield x[j:], x[:-j] yield x[...,j:], x[...,:-j] # An array with zero stride internal overlap strides = list(x.strides) strides[0] = 0 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # An array with non-zero stride internal overlap strides = list(x.strides) if strides[0] > 1: strides[0] = 1 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # Then discontiguous views while True: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) t1 = np.arange(x.ndim) rng.shuffle(t1) if equal_size: t2 = t1 else: t2 = np.arange(x.ndim) rng.shuffle(t2) a = x[s1] if equal_size: if a.size == 0: continue steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) s2 = tuple(random_slice_fixed_size(p, s, pa) for p, s, pa in zip(x.shape, steps2, a.shape)) elif same_steps: steps2 = steps else: steps2 = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) if not equal_size: s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) a = a.transpose(t1) b = x[s2].transpose(t2) yield a, b def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): # Check that overlap problems with common strides are solved with # little work. x = np.zeros([17,34,71,97], dtype=np.int16) feasible = 0 infeasible = 0 pair_iter = iter_random_view_pairs(x, same_steps) while min(feasible, infeasible) < min_count: a, b = next(pair_iter) bounds_overlap = np.may_share_memory(a, b) may_share_answer = np.may_share_memory(a, b) easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) if easy_answer != exact_answer: # assert_equal is slow... assert_equal(easy_answer, exact_answer) if may_share_answer != bounds_overlap: assert_equal(may_share_answer, bounds_overlap) if bounds_overlap: if exact_answer: feasible += 1 else: infeasible += 1 @pytest.mark.slow def test_may_share_memory_easy_fuzz(): # Check that overlap problems with common strides are always # solved with little work. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, same_steps=True, min_count=2000) @pytest.mark.slow def test_may_share_memory_harder_fuzz(): # Overlap problems with not necessarily common strides take more # work. # # The work bound below can't be reduced much. Harder problems can # also exist but not be detected here, as the set of problems # comes from RNG. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, same_steps=False, min_count=2000) def test_shares_memory_api(): x = np.zeros([4, 5, 6], dtype=np.int8) assert_equal(np.shares_memory(x, x), True) assert_equal(np.shares_memory(x, x.copy()), False) a = x[:,::2,::3] b = x[:,::3,::2] assert_equal(np.shares_memory(a, b), True) assert_equal(np.shares_memory(a, b, max_work=None), True) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) def test_may_share_memory_bad_max_work(): x = np.zeros([1]) assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) def test_internal_overlap_diophantine(): def check(A, U, exists=None): X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) if exists is None: exists = (X is not None) if X is not None: assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u//2 for x, u in zip(X, U))) if exists: assert_(X is not None, repr(X)) else: assert_(X is None, repr(X)) # Smoke tests check((3, 2), (2*2, 3*2), exists=True) check((3*2, 2), (15*2, (3-1)*2), exists=False) def test_internal_overlap_slices(): # Slicing an array never generates internal overlap x = np.zeros([17,34,71,97], dtype=np.int16) rng = np.random.RandomState(1234) def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) cases = 0 min_count = 5000 while cases < min_count: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) a = x[s1].transpose(t1) assert_(not internal_overlap(a)) cases += 1 def check_internal_overlap(a, manual_expected=None): got = internal_overlap(a) # Brute-force check m = set() ranges = tuple(xrange(n) for n in a.shape) for v in itertools.product(*ranges): offset = sum(s*w for s, w in zip(a.strides, v)) if offset in m: expected = True break else: m.add(offset) else: expected = False # Compare if got != expected: assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) if manual_expected is not None and expected != manual_expected: assert_equal(expected, manual_expected) return got def test_internal_overlap_manual(): # Stride tricks can construct arrays with internal overlap # We don't care about memory bounds, the array is not # read/write accessed x = np.arange(1).astype(np.int8) # Check low-dimensional special cases check_internal_overlap(x, False) # 1-dim check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) a = as_strided(x, strides=(3, 4), shape=(5, 4)) check_internal_overlap(a, True) a = as_strided(x, strides=(0,), shape=(0,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(1,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(2,)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(87, 22)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(1, 22)) check_internal_overlap(a, False) a = as_strided(x, strides=(0, -9993), shape=(0, 22)) check_internal_overlap(a, False) def test_internal_overlap_fuzz(): # Fuzz check; the brute-force check is fairly slow x = np.arange(1).astype(np.int8) overlap = 0 no_overlap = 0 min_count = 100 rng = np.random.RandomState(1234) while min(overlap, no_overlap) < min_count: ndim = rng.randint(1, 4, dtype=np.intp) strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) for j in range(ndim)) shape = tuple(rng.randint(1, 30, dtype=np.intp) for j in range(ndim)) a = as_strided(x, strides=strides, shape=shape) result = check_internal_overlap(a) if result: overlap += 1 else: no_overlap += 1 def test_non_ndarray_inputs(): # Regression check for gh-5604 class MyArray(object): def __init__(self, data): self.data = data @property def __array_interface__(self): return self.data.__array_interface__ class MyArray2(object): def __init__(self, data): self.data = data def __array__(self): return self.data for cls in [MyArray, MyArray2]: x = np.arange(5) assert_(np.may_share_memory(cls(x[::2]), x[1::2])) assert_(not np.shares_memory(cls(x[::2]), x[1::2])) assert_(np.shares_memory(cls(x[1::3]), x[::2])) assert_(np.may_share_memory(cls(x[1::3]), x[::2])) def view_element_first_byte(x): """Construct an array viewing the first byte of each element of `x`""" from numpy.lib.stride_tricks import DummyArray interface = dict(x.__array_interface__) interface['typestr'] = '|b1' interface['descr'] = [('', '|b1')] return np.asarray(DummyArray(interface, x)) def assert_copy_equivalent(operation, args, out, **kwargs): """ Check that operation(*args, out=out) produces results equivalent to out[...] = operation(*args, out=out.copy()) """ kwargs['out'] = out kwargs2 = dict(kwargs) kwargs2['out'] = out.copy() out_orig = out.copy() out[...] = operation(*args, **kwargs2) expected = out.copy() out[...] = out_orig got = operation(*args, **kwargs).copy() if (got != expected).any(): assert_equal(got, expected) class TestUFunc(object): """ Test ufunc call memory overlap handling """ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, count=5000): shapes = [7, 13, 8, 21, 29, 32] rng = np.random.RandomState(1234) for ndim in range(1, 6): x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = count // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) a_orig = a.copy() b_orig = b.copy() if get_out_axis_size is None: assert_copy_equivalent(operation, [a], out=b) if np.shares_memory(a, b): overlapping += 1 else: for axis in itertools.chain(range(ndim), [None]): a[...] = a_orig b[...] = b_orig # Determine size for reduction axis (None if scalar) outsize, scalarize = get_out_axis_size(a, b, axis) if outsize == 'skip': continue # Slice b to get an output array of the correct size sl = [slice(None)] * ndim if axis is None: if outsize is None: sl = [slice(0, 1)] + [0]*(ndim - 1) else: sl = [slice(0, outsize)] + [0]*(ndim - 1) else: if outsize is None: k = b.shape[axis]//2 if ndim == 1: sl[axis] = slice(k, k + 1) else: sl[axis] = k else: assert b.shape[axis] >= outsize sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: b_out = b_out.reshape([]) if np.shares_memory(a, b_out): overlapping += 1 # Check result assert_copy_equivalent(operation, [a], out=b_out, axis=axis) @pytest.mark.slow def test_unary_ufunc_call_fuzz(self): self.check_unary_fuzz(np.invert, None, np.int16) def test_binary_ufunc_accumulate_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # accumulate doesn't support this else: return a.shape[axis], False self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduce_fuzz(self): def get_out_axis_size(a, b, axis): return None, (axis is None or a.ndim == 1) self.check_unary_fuzz(np.add.reduce, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # reduceat doesn't support this else: return a.shape[axis], False def do_reduceat(a, out, axis): if axis is None: size = len(a) step = size//len(out) else: size = a.shape[axis] step = a.shape[axis] // out.shape[axis] idx = np.arange(0, size, step) return np.add.reduceat(a, idx, out=out, axis=axis) self.check_unary_fuzz(do_reduceat, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_manual(self): def check(ufunc, a, ind, out): c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) c2 = ufunc.reduceat(a, ind, out=out) assert_array_equal(c1, c2) # Exactly same input/output arrays a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1].copy(), a) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1], a) def test_unary_gufunc_fuzz(self): shapes = [7, 13, 8, 21, 29, 32] gufunc = _umath_tests.euclidean_pdist rng = np.random.RandomState(1234) for ndim in range(2, 6): x = rng.rand(*shapes[:ndim]) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = 500 // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: continue # Ensure the shapes are so that euclidean_pdist is happy if b.shape[-1] > b.shape[-2]: b = b[...,0,:] else: b = b[...,:,0] n = a.shape[-2] p = n * (n - 1) // 2 if p <= b.shape[-1] and p > 0: b = b[...,:p] else: n = max(2, int(np.sqrt(b.shape[-1]))//2) p = n * (n - 1) // 2 a = a[...,:n,:] b = b[...,:p] # Call if np.shares_memory(a, b): overlapping += 1 with np.errstate(over='ignore', invalid='ignore'): assert_copy_equivalent(gufunc, [a], out=b) def test_ufunc_at_manual(self): def check(ufunc, a, ind, b=None): a0 = a.copy() if b is None: ufunc.at(a0, ind.copy()) c1 = a0.copy() ufunc.at(a, ind) c2 = a.copy() else: ufunc.at(a0, ind.copy(), b.copy()) c1 = a0.copy() ufunc.at(a, ind, b) c2 = a.copy() assert_array_equal(c1, c2) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.invert, a[::-1], a) # Overlap with second data array a = np.arange(100, dtype=np.int16) ind = np.arange(0, 100, 2, dtype=np.int16) check(np.add, a, ind, a[25:75]) def test_unary_ufunc_1d_manual(self): # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE def check(a, b): a_orig = a.copy() b_orig = b.copy() b0 = b.copy() c1 = ufunc(a, out=b0) c2 = ufunc(a, out=b) assert_array_equal(c1, c2) # Trigger "fancy ufunc loop" code path mask = view_element_first_byte(b).view(np.bool_) a[...] = a_orig b[...] = b_orig c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() a[...] = a_orig b[...] = b_orig c2 = ufunc(a, out=b, where=mask.copy()).copy() # Also, mask overlapping with output a[...] = a_orig b[...] = b_orig c3 = ufunc(a, out=b, where=mask).copy() assert_array_equal(c1, c2) assert_array_equal(c1, c3) dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128] dtypes = [np.dtype(x) for x in dtypes] for dtype in dtypes: if np.issubdtype(dtype, np.integer): ufunc = np.invert else: ufunc = np.reciprocal n = 1000 k = 10 indices = [ np.index_exp[:n], np.index_exp[k:k+n], np.index_exp[n-1::-1], np.index_exp[k+n-1:k-1:-1], np.index_exp[:2*n:2], np.index_exp[k:k+2*n:2], np.index_exp[2*n-1::-2], np.index_exp[k+2*n-1:k-1:-2], ] for xi, yi in itertools.product(indices, indices): v = np.arange(1, 1 + n*2 + k, dtype=dtype) x = v[xi] y = v[yi] with np.errstate(all='ignore'): check(x, y) # Scalar cases check(x[:1], y) check(x[-1:], y) check(x[:1].reshape([]), y) check(x[-1:].reshape([]), y) def test_unary_ufunc_where_same(self): # Check behavior at wheremask overlap ufunc = np.invert def check(a, out, mask): c1 = ufunc(a, out=out.copy(), where=mask.copy()) c2 = ufunc(a, out=out, where=mask) assert_array_equal(c1, c2) # Check behavior with same input and output arrays x = np.arange(100).astype(np.bool_) check(x, x, x) check(x, x.copy(), x) check(x, x, x.copy()) @pytest.mark.slow def test_binary_ufunc_1d_manual(self): ufunc = np.add def check(a, b, c): c0 = c.copy() c1 = ufunc(a, b, out=c0) c2 = ufunc(a, b, out=c) assert_array_equal(c1, c2) for dtype in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128]: # Check different data dependency orders n = 1000 k = 10 indices = [] for p in [1, 2]: indices.extend([ np.index_exp[:p*n:p], np.index_exp[k:k+p*n:p], np.index_exp[p*n-1::-p], np.index_exp[k+p*n-1:k-1:-p], ]) for x, y, z in itertools.product(indices, indices, indices): v = np.arange(6*n).astype(dtype) x = v[x] y = v[y] z = v[z] check(x, y, z) # Scalar cases check(x[:1], y, z) check(x[-1:], y, z) check(x[:1].reshape([]), y, z) check(x[-1:].reshape([]), y, z) check(x, y[:1], z) check(x, y[-1:], z) check(x, y[:1].reshape([]), z) check(x, y[-1:].reshape([]), z) def test_inplace_op_simple_manual(self): rng = np.random.RandomState(1234) x = rng.rand(200, 200) # bigger than bufsize x += x.T assert_array_equal(x - x.T, 0)
ahaldane/numpy
numpy/core/tests/test_mem_overlap.py
numpy/polynomial/tests/test_hermite_e.py
# http://www.absoft.com/literature/osxuserguide.pdf # http://www.absoft.com/documentation.html # Notes: # - when using -g77 then use -DUNDERSCORE_G77 to compile f2py # generated extension modules (works for f2py v2.45.241_1936 and up) from __future__ import division, absolute_import, print_function import os from numpy.distutils.cpuinfo import cpu from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file from numpy.distutils.misc_util import cyg2win32 compilers = ['AbsoftFCompiler'] class AbsoftFCompiler(FCompiler): compiler_type = 'absoft' description = 'Absoft Corp Fortran Compiler' #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp' version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)' # on windows: f90 -V -c dummy.f # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 # samt5735(8)$ f90 -V -c dummy.f # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 # Note that fink installs g77 as f77, so need to use f90 for detection. executables = { 'version_cmd' : None, # set by update_executables 'compiler_f77' : ["f77"], 'compiler_fix' : ["f90"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } if os.name=='nt': library_switch = '/out:' #No space after /out:! module_dir_switch = None module_include_switch = '-p' def update_executables(self): f = cyg2win32(dummy_fortran_file()) self.executables['version_cmd'] = ['<F90>', '-V', '-c', f+'.f', '-o', f+'.o'] def get_flags_linker_so(self): if os.name=='nt': opt = ['/dll'] # The "-K shared" switches are being left in for pre-9.0 versions # of Absoft though I don't think versions earlier than 9 can # actually be used to build shared libraries. In fact, version # 8 of Absoft doesn't recognize "-K shared" and will fail. elif self.get_version() >= '9.0': opt = ['-shared'] else: opt = ["-K", "shared"] return opt def library_dir_option(self, dir): if os.name=='nt': return ['-link', '/PATH:%s' % (dir)] return "-L" + dir def library_option(self, lib): if os.name=='nt': return '%s.lib' % (lib) return "-l" + lib def get_library_dirs(self): opt = FCompiler.get_library_dirs(self) d = os.environ.get('ABSOFT') if d: if self.get_version() >= '10.0': # use shared libraries, the static libraries were not compiled -fPIC prefix = 'sh' else: prefix = '' if cpu.is_64bit(): suffix = '64' else: suffix = '' opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) return opt def get_libraries(self): opt = FCompiler.get_libraries(self) if self.get_version() >= '11.0': opt.extend(['af90math', 'afio', 'af77math', 'amisc']) elif self.get_version() >= '10.0': opt.extend(['af90math', 'afio', 'af77math', 'U77']) elif self.get_version() >= '8.0': opt.extend(['f90math', 'fio', 'f77math', 'U77']) else: opt.extend(['fio', 'f90math', 'fmath', 'U77']) if os.name =='nt': opt.append('COMDLG32') return opt def get_flags(self): opt = FCompiler.get_flags(self) if os.name != 'nt': opt.extend(['-s']) if self.get_version(): if self.get_version()>='8.2': opt.append('-fpic') return opt def get_flags_f77(self): opt = FCompiler.get_flags_f77(self) opt.extend(['-N22', '-N90', '-N110']) v = self.get_version() if os.name == 'nt': if v and v>='8.0': opt.extend(['-f', '-N15']) else: opt.append('-f') if v: if v<='4.6': opt.append('-B108') else: # Though -N15 is undocumented, it works with # Absoft 8.0 on Linux opt.append('-N15') return opt def get_flags_f90(self): opt = FCompiler.get_flags_f90(self) opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) if self.get_version(): if self.get_version()>'4.6': opt.extend(["-YDEALLOC=ALL"]) return opt def get_flags_fix(self): opt = FCompiler.get_flags_fix(self) opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) opt.extend(["-f", "fixed"]) return opt def get_flags_opt(self): opt = ['-O'] return opt if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='absoft').get_version())
from __future__ import division, absolute_import, print_function import sys import itertools import pytest import numpy as np from numpy.core._multiarray_tests import solve_diophantine, internal_overlap from numpy.core import _umath_tests from numpy.lib.stride_tricks import as_strided from numpy.compat import long from numpy.testing import ( assert_, assert_raises, assert_equal, assert_array_equal ) if sys.version_info[0] >= 3: xrange = range ndims = 2 size = 10 shape = tuple([size] * ndims) MAY_SHARE_BOUNDS = 0 MAY_SHARE_EXACT = -1 def _indices_for_nelems(nelems): """Returns slices of length nelems, from start onwards, in direction sign.""" if nelems == 0: return [size // 2] # int index res = [] for step in (1, 2): for sign in (-1, 1): start = size // 2 - nelems * step * sign // 2 stop = start + nelems * step * sign res.append(slice(start, stop, step * sign)) return res def _indices_for_axis(): """Returns (src, dst) pairs of indices.""" res = [] for nelems in (0, 2, 3): ind = _indices_for_nelems(nelems) # no itertools.product available in Py2.4 res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" return res def _indices(ndims): """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" ind = _indices_for_axis() # no itertools.product available in Py2.4 res = [[]] for i in range(ndims): newres = [] for elem in ind: for others in res: newres.append([elem] + others) res = newres return res def _check_assignment(srcidx, dstidx): """Check assignment arr[dstidx] = arr[srcidx] works.""" arr = np.arange(np.product(shape)).reshape(shape) cpy = arr.copy() cpy[dstidx] = arr[srcidx] arr[dstidx] = arr[srcidx] assert_(np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) def test_overlapping_assignments(): # Test automatically generated assignments which overlap in memory. inds = _indices(ndims) for ind in inds: srcidx = tuple([a[0] for a in ind]) dstidx = tuple([a[1] for a in ind]) _check_assignment(srcidx, dstidx) @pytest.mark.slow def test_diophantine_fuzz(): # Fuzz test the diophantine solver rng = np.random.RandomState(1234) max_int = np.iinfo(np.intp).max for ndim in range(10): feasible_count = 0 infeasible_count = 0 min_count = 500//(ndim + 1) while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) U_max = min(max_int-1, U_max) A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) for j in range(ndim)) U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) for j in range(ndim)) b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) b = rng.randint(-1, b_ub+2, dtype=np.intp) if ndim == 0 and feasible_count < min_count: b = 0 X = solve_diophantine(A, U, b) if X is None: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is None, (A, U, b, X_simplified)) # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) try: ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) except OverflowError: # xrange on 32-bit Python 2 may overflow continue size = 1 for r in ranges: size *= len(r) if size < 100000: assert_(not any(sum(w) == b for w in itertools.product(*ranges))) infeasible_count += 1 else: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity assert_(sum(a*x for a, x in zip(A, X)) == b) assert_(all(0 <= x <= ub for x, ub in zip(X, U))) feasible_count += 1 def test_diophantine_overflow(): # Smoke test integer overflow detection max_intp = np.iinfo(np.intp).max max_int64 = np.iinfo(np.int64).max if max_int64 <= max_intp: # Check that the algorithm works internally in 128-bit; # solving this problem requires large intermediate numbers A = (max_int64//2, max_int64//2 - 10) U = (max_int64//2, max_int64//2 - 10) b = 2*(max_int64//2) - 10 assert_equal(solve_diophantine(A, U, b), (1, 1)) def check_may_share_memory_exact(a, b): got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) assert_equal(np.may_share_memory(a, b), np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) a.fill(0) b.fill(0) a.fill(1) exact = b.any() err_msg = "" if got != exact: err_msg = " " + "\n ".join([ "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), "shape_a = %r" % (a.shape,), "shape_b = %r" % (b.shape,), "strides_a = %r" % (a.strides,), "strides_b = %r" % (b.strides,), "size_a = %r" % (a.size,), "size_b = %r" % (b.size,) ]) assert_equal(got, exact, err_msg=err_msg) def test_may_share_memory_manual(): # Manual test cases for may_share_memory # Base arrays xs0 = [ np.zeros([13, 21, 23, 22], dtype=np.int8), np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] ] # Generate all negative stride combinations xs = [] for x in xs0: for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): xp = x[ss] xs.append(xp) for x in xs: # The default is a simple extent check assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) # Exact checks check_may_share_memory_exact(x[:,0,:], x[:,1,:]) check_may_share_memory_exact(x[:,::7], x[:,3::3]) try: xp = x.ravel() if xp.flags.owndata: continue xp = xp.view(np.int16) except ValueError: continue # 0-size arrays cannot overlap check_may_share_memory_exact(x.ravel()[6:6], xp.reshape(13, 21, 23, 11)[:,::7]) # Test itemsize is dealt with check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)) check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)[:,3::3]) check_may_share_memory_exact(x.ravel()[6:7], xp.reshape(13, 21, 23, 11)[:,::7]) # Check unit size x = np.zeros([1], dtype=np.int8) check_may_share_memory_exact(x, x) check_may_share_memory_exact(x, x.copy()) def iter_random_view_pairs(x, same_steps=True, equal_size=False): rng = np.random.RandomState(1234) if equal_size and same_steps: raise ValueError() def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) def random_slice_fixed_size(n, step, size): start = rng.randint(0, n+1 - size*step) stop = start + (size-1)*step + 1 if rng.randint(0, 2) == 0: stop, start = start-1, stop-1 if stop < 0: stop = None step *= -1 return slice(start, stop, step) # First a few regular views yield x, x for j in range(1, 7, 3): yield x[j:], x[:-j] yield x[...,j:], x[...,:-j] # An array with zero stride internal overlap strides = list(x.strides) strides[0] = 0 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # An array with non-zero stride internal overlap strides = list(x.strides) if strides[0] > 1: strides[0] = 1 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # Then discontiguous views while True: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) t1 = np.arange(x.ndim) rng.shuffle(t1) if equal_size: t2 = t1 else: t2 = np.arange(x.ndim) rng.shuffle(t2) a = x[s1] if equal_size: if a.size == 0: continue steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) s2 = tuple(random_slice_fixed_size(p, s, pa) for p, s, pa in zip(x.shape, steps2, a.shape)) elif same_steps: steps2 = steps else: steps2 = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) if not equal_size: s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) a = a.transpose(t1) b = x[s2].transpose(t2) yield a, b def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): # Check that overlap problems with common strides are solved with # little work. x = np.zeros([17,34,71,97], dtype=np.int16) feasible = 0 infeasible = 0 pair_iter = iter_random_view_pairs(x, same_steps) while min(feasible, infeasible) < min_count: a, b = next(pair_iter) bounds_overlap = np.may_share_memory(a, b) may_share_answer = np.may_share_memory(a, b) easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) if easy_answer != exact_answer: # assert_equal is slow... assert_equal(easy_answer, exact_answer) if may_share_answer != bounds_overlap: assert_equal(may_share_answer, bounds_overlap) if bounds_overlap: if exact_answer: feasible += 1 else: infeasible += 1 @pytest.mark.slow def test_may_share_memory_easy_fuzz(): # Check that overlap problems with common strides are always # solved with little work. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, same_steps=True, min_count=2000) @pytest.mark.slow def test_may_share_memory_harder_fuzz(): # Overlap problems with not necessarily common strides take more # work. # # The work bound below can't be reduced much. Harder problems can # also exist but not be detected here, as the set of problems # comes from RNG. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, same_steps=False, min_count=2000) def test_shares_memory_api(): x = np.zeros([4, 5, 6], dtype=np.int8) assert_equal(np.shares_memory(x, x), True) assert_equal(np.shares_memory(x, x.copy()), False) a = x[:,::2,::3] b = x[:,::3,::2] assert_equal(np.shares_memory(a, b), True) assert_equal(np.shares_memory(a, b, max_work=None), True) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) def test_may_share_memory_bad_max_work(): x = np.zeros([1]) assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) def test_internal_overlap_diophantine(): def check(A, U, exists=None): X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) if exists is None: exists = (X is not None) if X is not None: assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u//2 for x, u in zip(X, U))) if exists: assert_(X is not None, repr(X)) else: assert_(X is None, repr(X)) # Smoke tests check((3, 2), (2*2, 3*2), exists=True) check((3*2, 2), (15*2, (3-1)*2), exists=False) def test_internal_overlap_slices(): # Slicing an array never generates internal overlap x = np.zeros([17,34,71,97], dtype=np.int16) rng = np.random.RandomState(1234) def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) cases = 0 min_count = 5000 while cases < min_count: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) a = x[s1].transpose(t1) assert_(not internal_overlap(a)) cases += 1 def check_internal_overlap(a, manual_expected=None): got = internal_overlap(a) # Brute-force check m = set() ranges = tuple(xrange(n) for n in a.shape) for v in itertools.product(*ranges): offset = sum(s*w for s, w in zip(a.strides, v)) if offset in m: expected = True break else: m.add(offset) else: expected = False # Compare if got != expected: assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) if manual_expected is not None and expected != manual_expected: assert_equal(expected, manual_expected) return got def test_internal_overlap_manual(): # Stride tricks can construct arrays with internal overlap # We don't care about memory bounds, the array is not # read/write accessed x = np.arange(1).astype(np.int8) # Check low-dimensional special cases check_internal_overlap(x, False) # 1-dim check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) a = as_strided(x, strides=(3, 4), shape=(5, 4)) check_internal_overlap(a, True) a = as_strided(x, strides=(0,), shape=(0,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(1,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(2,)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(87, 22)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(1, 22)) check_internal_overlap(a, False) a = as_strided(x, strides=(0, -9993), shape=(0, 22)) check_internal_overlap(a, False) def test_internal_overlap_fuzz(): # Fuzz check; the brute-force check is fairly slow x = np.arange(1).astype(np.int8) overlap = 0 no_overlap = 0 min_count = 100 rng = np.random.RandomState(1234) while min(overlap, no_overlap) < min_count: ndim = rng.randint(1, 4, dtype=np.intp) strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) for j in range(ndim)) shape = tuple(rng.randint(1, 30, dtype=np.intp) for j in range(ndim)) a = as_strided(x, strides=strides, shape=shape) result = check_internal_overlap(a) if result: overlap += 1 else: no_overlap += 1 def test_non_ndarray_inputs(): # Regression check for gh-5604 class MyArray(object): def __init__(self, data): self.data = data @property def __array_interface__(self): return self.data.__array_interface__ class MyArray2(object): def __init__(self, data): self.data = data def __array__(self): return self.data for cls in [MyArray, MyArray2]: x = np.arange(5) assert_(np.may_share_memory(cls(x[::2]), x[1::2])) assert_(not np.shares_memory(cls(x[::2]), x[1::2])) assert_(np.shares_memory(cls(x[1::3]), x[::2])) assert_(np.may_share_memory(cls(x[1::3]), x[::2])) def view_element_first_byte(x): """Construct an array viewing the first byte of each element of `x`""" from numpy.lib.stride_tricks import DummyArray interface = dict(x.__array_interface__) interface['typestr'] = '|b1' interface['descr'] = [('', '|b1')] return np.asarray(DummyArray(interface, x)) def assert_copy_equivalent(operation, args, out, **kwargs): """ Check that operation(*args, out=out) produces results equivalent to out[...] = operation(*args, out=out.copy()) """ kwargs['out'] = out kwargs2 = dict(kwargs) kwargs2['out'] = out.copy() out_orig = out.copy() out[...] = operation(*args, **kwargs2) expected = out.copy() out[...] = out_orig got = operation(*args, **kwargs).copy() if (got != expected).any(): assert_equal(got, expected) class TestUFunc(object): """ Test ufunc call memory overlap handling """ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, count=5000): shapes = [7, 13, 8, 21, 29, 32] rng = np.random.RandomState(1234) for ndim in range(1, 6): x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = count // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) a_orig = a.copy() b_orig = b.copy() if get_out_axis_size is None: assert_copy_equivalent(operation, [a], out=b) if np.shares_memory(a, b): overlapping += 1 else: for axis in itertools.chain(range(ndim), [None]): a[...] = a_orig b[...] = b_orig # Determine size for reduction axis (None if scalar) outsize, scalarize = get_out_axis_size(a, b, axis) if outsize == 'skip': continue # Slice b to get an output array of the correct size sl = [slice(None)] * ndim if axis is None: if outsize is None: sl = [slice(0, 1)] + [0]*(ndim - 1) else: sl = [slice(0, outsize)] + [0]*(ndim - 1) else: if outsize is None: k = b.shape[axis]//2 if ndim == 1: sl[axis] = slice(k, k + 1) else: sl[axis] = k else: assert b.shape[axis] >= outsize sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: b_out = b_out.reshape([]) if np.shares_memory(a, b_out): overlapping += 1 # Check result assert_copy_equivalent(operation, [a], out=b_out, axis=axis) @pytest.mark.slow def test_unary_ufunc_call_fuzz(self): self.check_unary_fuzz(np.invert, None, np.int16) def test_binary_ufunc_accumulate_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # accumulate doesn't support this else: return a.shape[axis], False self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduce_fuzz(self): def get_out_axis_size(a, b, axis): return None, (axis is None or a.ndim == 1) self.check_unary_fuzz(np.add.reduce, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # reduceat doesn't support this else: return a.shape[axis], False def do_reduceat(a, out, axis): if axis is None: size = len(a) step = size//len(out) else: size = a.shape[axis] step = a.shape[axis] // out.shape[axis] idx = np.arange(0, size, step) return np.add.reduceat(a, idx, out=out, axis=axis) self.check_unary_fuzz(do_reduceat, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_manual(self): def check(ufunc, a, ind, out): c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) c2 = ufunc.reduceat(a, ind, out=out) assert_array_equal(c1, c2) # Exactly same input/output arrays a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1].copy(), a) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1], a) def test_unary_gufunc_fuzz(self): shapes = [7, 13, 8, 21, 29, 32] gufunc = _umath_tests.euclidean_pdist rng = np.random.RandomState(1234) for ndim in range(2, 6): x = rng.rand(*shapes[:ndim]) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = 500 // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: continue # Ensure the shapes are so that euclidean_pdist is happy if b.shape[-1] > b.shape[-2]: b = b[...,0,:] else: b = b[...,:,0] n = a.shape[-2] p = n * (n - 1) // 2 if p <= b.shape[-1] and p > 0: b = b[...,:p] else: n = max(2, int(np.sqrt(b.shape[-1]))//2) p = n * (n - 1) // 2 a = a[...,:n,:] b = b[...,:p] # Call if np.shares_memory(a, b): overlapping += 1 with np.errstate(over='ignore', invalid='ignore'): assert_copy_equivalent(gufunc, [a], out=b) def test_ufunc_at_manual(self): def check(ufunc, a, ind, b=None): a0 = a.copy() if b is None: ufunc.at(a0, ind.copy()) c1 = a0.copy() ufunc.at(a, ind) c2 = a.copy() else: ufunc.at(a0, ind.copy(), b.copy()) c1 = a0.copy() ufunc.at(a, ind, b) c2 = a.copy() assert_array_equal(c1, c2) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.invert, a[::-1], a) # Overlap with second data array a = np.arange(100, dtype=np.int16) ind = np.arange(0, 100, 2, dtype=np.int16) check(np.add, a, ind, a[25:75]) def test_unary_ufunc_1d_manual(self): # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE def check(a, b): a_orig = a.copy() b_orig = b.copy() b0 = b.copy() c1 = ufunc(a, out=b0) c2 = ufunc(a, out=b) assert_array_equal(c1, c2) # Trigger "fancy ufunc loop" code path mask = view_element_first_byte(b).view(np.bool_) a[...] = a_orig b[...] = b_orig c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() a[...] = a_orig b[...] = b_orig c2 = ufunc(a, out=b, where=mask.copy()).copy() # Also, mask overlapping with output a[...] = a_orig b[...] = b_orig c3 = ufunc(a, out=b, where=mask).copy() assert_array_equal(c1, c2) assert_array_equal(c1, c3) dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128] dtypes = [np.dtype(x) for x in dtypes] for dtype in dtypes: if np.issubdtype(dtype, np.integer): ufunc = np.invert else: ufunc = np.reciprocal n = 1000 k = 10 indices = [ np.index_exp[:n], np.index_exp[k:k+n], np.index_exp[n-1::-1], np.index_exp[k+n-1:k-1:-1], np.index_exp[:2*n:2], np.index_exp[k:k+2*n:2], np.index_exp[2*n-1::-2], np.index_exp[k+2*n-1:k-1:-2], ] for xi, yi in itertools.product(indices, indices): v = np.arange(1, 1 + n*2 + k, dtype=dtype) x = v[xi] y = v[yi] with np.errstate(all='ignore'): check(x, y) # Scalar cases check(x[:1], y) check(x[-1:], y) check(x[:1].reshape([]), y) check(x[-1:].reshape([]), y) def test_unary_ufunc_where_same(self): # Check behavior at wheremask overlap ufunc = np.invert def check(a, out, mask): c1 = ufunc(a, out=out.copy(), where=mask.copy()) c2 = ufunc(a, out=out, where=mask) assert_array_equal(c1, c2) # Check behavior with same input and output arrays x = np.arange(100).astype(np.bool_) check(x, x, x) check(x, x.copy(), x) check(x, x, x.copy()) @pytest.mark.slow def test_binary_ufunc_1d_manual(self): ufunc = np.add def check(a, b, c): c0 = c.copy() c1 = ufunc(a, b, out=c0) c2 = ufunc(a, b, out=c) assert_array_equal(c1, c2) for dtype in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128]: # Check different data dependency orders n = 1000 k = 10 indices = [] for p in [1, 2]: indices.extend([ np.index_exp[:p*n:p], np.index_exp[k:k+p*n:p], np.index_exp[p*n-1::-p], np.index_exp[k+p*n-1:k-1:-p], ]) for x, y, z in itertools.product(indices, indices, indices): v = np.arange(6*n).astype(dtype) x = v[x] y = v[y] z = v[z] check(x, y, z) # Scalar cases check(x[:1], y, z) check(x[-1:], y, z) check(x[:1].reshape([]), y, z) check(x[-1:].reshape([]), y, z) check(x, y[:1], z) check(x, y[-1:], z) check(x, y[:1].reshape([]), z) check(x, y[-1:].reshape([]), z) def test_inplace_op_simple_manual(self): rng = np.random.RandomState(1234) x = rng.rand(200, 200) # bigger than bufsize x += x.T assert_array_equal(x - x.T, 0)
ahaldane/numpy
numpy/core/tests/test_mem_overlap.py
numpy/distutils/fcompiler/absoft.py
from __future__ import division, absolute_import, print_function import os import re import sys import subprocess from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.misc_util import make_temp_file from distutils import log compilers = ['IBMFCompiler'] class IBMFCompiler(FCompiler): compiler_type = 'ibm' description = 'IBM XL Fortran Compiler' version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)' #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 executables = { 'version_cmd' : ["<F77>", "-qversion"], 'compiler_f77' : ["xlf"], 'compiler_fix' : ["xlf90", "-qfixed"], 'compiler_f90' : ["xlf90"], 'linker_so' : ["xlf95"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_version(self,*args,**kwds): version = FCompiler.get_version(self,*args,**kwds) if version is None and sys.platform.startswith('aix'): # use lslpp to find out xlf version lslpp = find_executable('lslpp') xlf = find_executable('xlf') if os.path.exists(xlf) and os.path.exists(lslpp): try: o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) except (OSError, subprocess.CalledProcessError): pass else: m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o) if m: version = m.group('version') xlf_dir = '/etc/opt/ibmcmp/xlf' if version is None and os.path.isdir(xlf_dir): # linux: # If the output of xlf does not contain version info # (that's the case with xlf 8.1, for instance) then # let's try another method: l = sorted(os.listdir(xlf_dir)) l.reverse() l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] if l: from distutils.version import LooseVersion self.version = version = LooseVersion(l[0]) return version def get_flags(self): return ['-qextname'] def get_flags_debug(self): return ['-g'] def get_flags_linker_so(self): opt = [] if sys.platform=='darwin': opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') else: opt.append('-bshared') version = self.get_version(ok_status=[0, 40]) if version is not None: if sys.platform.startswith('aix'): xlf_cfg = '/etc/xlf.cfg' else: xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) with open(xlf_cfg, 'r') as fi: crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match for line in fi: m = crt1_match(line) if m: fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) else: fo.write(line) fo.close() opt.append('-F'+new_cfg) return opt def get_flags_opt(self): return ['-O3'] if __name__ == '__main__': from numpy.distutils import customized_fcompiler log.set_verbosity(2) print(customized_fcompiler(compiler='ibm').get_version())
from __future__ import division, absolute_import, print_function import sys import itertools import pytest import numpy as np from numpy.core._multiarray_tests import solve_diophantine, internal_overlap from numpy.core import _umath_tests from numpy.lib.stride_tricks import as_strided from numpy.compat import long from numpy.testing import ( assert_, assert_raises, assert_equal, assert_array_equal ) if sys.version_info[0] >= 3: xrange = range ndims = 2 size = 10 shape = tuple([size] * ndims) MAY_SHARE_BOUNDS = 0 MAY_SHARE_EXACT = -1 def _indices_for_nelems(nelems): """Returns slices of length nelems, from start onwards, in direction sign.""" if nelems == 0: return [size // 2] # int index res = [] for step in (1, 2): for sign in (-1, 1): start = size // 2 - nelems * step * sign // 2 stop = start + nelems * step * sign res.append(slice(start, stop, step * sign)) return res def _indices_for_axis(): """Returns (src, dst) pairs of indices.""" res = [] for nelems in (0, 2, 3): ind = _indices_for_nelems(nelems) # no itertools.product available in Py2.4 res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" return res def _indices(ndims): """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" ind = _indices_for_axis() # no itertools.product available in Py2.4 res = [[]] for i in range(ndims): newres = [] for elem in ind: for others in res: newres.append([elem] + others) res = newres return res def _check_assignment(srcidx, dstidx): """Check assignment arr[dstidx] = arr[srcidx] works.""" arr = np.arange(np.product(shape)).reshape(shape) cpy = arr.copy() cpy[dstidx] = arr[srcidx] arr[dstidx] = arr[srcidx] assert_(np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) def test_overlapping_assignments(): # Test automatically generated assignments which overlap in memory. inds = _indices(ndims) for ind in inds: srcidx = tuple([a[0] for a in ind]) dstidx = tuple([a[1] for a in ind]) _check_assignment(srcidx, dstidx) @pytest.mark.slow def test_diophantine_fuzz(): # Fuzz test the diophantine solver rng = np.random.RandomState(1234) max_int = np.iinfo(np.intp).max for ndim in range(10): feasible_count = 0 infeasible_count = 0 min_count = 500//(ndim + 1) while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) U_max = min(max_int-1, U_max) A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) for j in range(ndim)) U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) for j in range(ndim)) b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) b = rng.randint(-1, b_ub+2, dtype=np.intp) if ndim == 0 and feasible_count < min_count: b = 0 X = solve_diophantine(A, U, b) if X is None: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is None, (A, U, b, X_simplified)) # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) try: ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) except OverflowError: # xrange on 32-bit Python 2 may overflow continue size = 1 for r in ranges: size *= len(r) if size < 100000: assert_(not any(sum(w) == b for w in itertools.product(*ranges))) infeasible_count += 1 else: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity assert_(sum(a*x for a, x in zip(A, X)) == b) assert_(all(0 <= x <= ub for x, ub in zip(X, U))) feasible_count += 1 def test_diophantine_overflow(): # Smoke test integer overflow detection max_intp = np.iinfo(np.intp).max max_int64 = np.iinfo(np.int64).max if max_int64 <= max_intp: # Check that the algorithm works internally in 128-bit; # solving this problem requires large intermediate numbers A = (max_int64//2, max_int64//2 - 10) U = (max_int64//2, max_int64//2 - 10) b = 2*(max_int64//2) - 10 assert_equal(solve_diophantine(A, U, b), (1, 1)) def check_may_share_memory_exact(a, b): got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) assert_equal(np.may_share_memory(a, b), np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) a.fill(0) b.fill(0) a.fill(1) exact = b.any() err_msg = "" if got != exact: err_msg = " " + "\n ".join([ "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), "shape_a = %r" % (a.shape,), "shape_b = %r" % (b.shape,), "strides_a = %r" % (a.strides,), "strides_b = %r" % (b.strides,), "size_a = %r" % (a.size,), "size_b = %r" % (b.size,) ]) assert_equal(got, exact, err_msg=err_msg) def test_may_share_memory_manual(): # Manual test cases for may_share_memory # Base arrays xs0 = [ np.zeros([13, 21, 23, 22], dtype=np.int8), np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] ] # Generate all negative stride combinations xs = [] for x in xs0: for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): xp = x[ss] xs.append(xp) for x in xs: # The default is a simple extent check assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) # Exact checks check_may_share_memory_exact(x[:,0,:], x[:,1,:]) check_may_share_memory_exact(x[:,::7], x[:,3::3]) try: xp = x.ravel() if xp.flags.owndata: continue xp = xp.view(np.int16) except ValueError: continue # 0-size arrays cannot overlap check_may_share_memory_exact(x.ravel()[6:6], xp.reshape(13, 21, 23, 11)[:,::7]) # Test itemsize is dealt with check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)) check_may_share_memory_exact(x[:,::7], xp.reshape(13, 21, 23, 11)[:,3::3]) check_may_share_memory_exact(x.ravel()[6:7], xp.reshape(13, 21, 23, 11)[:,::7]) # Check unit size x = np.zeros([1], dtype=np.int8) check_may_share_memory_exact(x, x) check_may_share_memory_exact(x, x.copy()) def iter_random_view_pairs(x, same_steps=True, equal_size=False): rng = np.random.RandomState(1234) if equal_size and same_steps: raise ValueError() def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) def random_slice_fixed_size(n, step, size): start = rng.randint(0, n+1 - size*step) stop = start + (size-1)*step + 1 if rng.randint(0, 2) == 0: stop, start = start-1, stop-1 if stop < 0: stop = None step *= -1 return slice(start, stop, step) # First a few regular views yield x, x for j in range(1, 7, 3): yield x[j:], x[:-j] yield x[...,j:], x[...,:-j] # An array with zero stride internal overlap strides = list(x.strides) strides[0] = 0 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # An array with non-zero stride internal overlap strides = list(x.strides) if strides[0] > 1: strides[0] = 1 xp = as_strided(x, shape=x.shape, strides=strides) yield x, xp yield xp, xp # Then discontiguous views while True: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) t1 = np.arange(x.ndim) rng.shuffle(t1) if equal_size: t2 = t1 else: t2 = np.arange(x.ndim) rng.shuffle(t2) a = x[s1] if equal_size: if a.size == 0: continue steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) s2 = tuple(random_slice_fixed_size(p, s, pa) for p, s, pa in zip(x.shape, steps2, a.shape)) elif same_steps: steps2 = steps else: steps2 = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) if not equal_size: s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) a = a.transpose(t1) b = x[s2].transpose(t2) yield a, b def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): # Check that overlap problems with common strides are solved with # little work. x = np.zeros([17,34,71,97], dtype=np.int16) feasible = 0 infeasible = 0 pair_iter = iter_random_view_pairs(x, same_steps) while min(feasible, infeasible) < min_count: a, b = next(pair_iter) bounds_overlap = np.may_share_memory(a, b) may_share_answer = np.may_share_memory(a, b) easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) if easy_answer != exact_answer: # assert_equal is slow... assert_equal(easy_answer, exact_answer) if may_share_answer != bounds_overlap: assert_equal(may_share_answer, bounds_overlap) if bounds_overlap: if exact_answer: feasible += 1 else: infeasible += 1 @pytest.mark.slow def test_may_share_memory_easy_fuzz(): # Check that overlap problems with common strides are always # solved with little work. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, same_steps=True, min_count=2000) @pytest.mark.slow def test_may_share_memory_harder_fuzz(): # Overlap problems with not necessarily common strides take more # work. # # The work bound below can't be reduced much. Harder problems can # also exist but not be detected here, as the set of problems # comes from RNG. check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, same_steps=False, min_count=2000) def test_shares_memory_api(): x = np.zeros([4, 5, 6], dtype=np.int8) assert_equal(np.shares_memory(x, x), True) assert_equal(np.shares_memory(x, x.copy()), False) a = x[:,::2,::3] b = x[:,::3,::2] assert_equal(np.shares_memory(a, b), True) assert_equal(np.shares_memory(a, b, max_work=None), True) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) def test_may_share_memory_bad_max_work(): x = np.zeros([1]) assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) def test_internal_overlap_diophantine(): def check(A, U, exists=None): X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) if exists is None: exists = (X is not None) if X is not None: assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u//2 for x, u in zip(X, U))) if exists: assert_(X is not None, repr(X)) else: assert_(X is None, repr(X)) # Smoke tests check((3, 2), (2*2, 3*2), exists=True) check((3*2, 2), (15*2, (3-1)*2), exists=False) def test_internal_overlap_slices(): # Slicing an array never generates internal overlap x = np.zeros([17,34,71,97], dtype=np.int16) rng = np.random.RandomState(1234) def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) stop = rng.randint(start, n+1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) cases = 0 min_count = 5000 while cases < min_count: steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) a = x[s1].transpose(t1) assert_(not internal_overlap(a)) cases += 1 def check_internal_overlap(a, manual_expected=None): got = internal_overlap(a) # Brute-force check m = set() ranges = tuple(xrange(n) for n in a.shape) for v in itertools.product(*ranges): offset = sum(s*w for s, w in zip(a.strides, v)) if offset in m: expected = True break else: m.add(offset) else: expected = False # Compare if got != expected: assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) if manual_expected is not None and expected != manual_expected: assert_equal(expected, manual_expected) return got def test_internal_overlap_manual(): # Stride tricks can construct arrays with internal overlap # We don't care about memory bounds, the array is not # read/write accessed x = np.arange(1).astype(np.int8) # Check low-dimensional special cases check_internal_overlap(x, False) # 1-dim check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) a = as_strided(x, strides=(3, 4), shape=(5, 4)) check_internal_overlap(a, True) a = as_strided(x, strides=(0,), shape=(0,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(1,)) check_internal_overlap(a, False) a = as_strided(x, strides=(0,), shape=(2,)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(87, 22)) check_internal_overlap(a, True) a = as_strided(x, strides=(0, -9993), shape=(1, 22)) check_internal_overlap(a, False) a = as_strided(x, strides=(0, -9993), shape=(0, 22)) check_internal_overlap(a, False) def test_internal_overlap_fuzz(): # Fuzz check; the brute-force check is fairly slow x = np.arange(1).astype(np.int8) overlap = 0 no_overlap = 0 min_count = 100 rng = np.random.RandomState(1234) while min(overlap, no_overlap) < min_count: ndim = rng.randint(1, 4, dtype=np.intp) strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) for j in range(ndim)) shape = tuple(rng.randint(1, 30, dtype=np.intp) for j in range(ndim)) a = as_strided(x, strides=strides, shape=shape) result = check_internal_overlap(a) if result: overlap += 1 else: no_overlap += 1 def test_non_ndarray_inputs(): # Regression check for gh-5604 class MyArray(object): def __init__(self, data): self.data = data @property def __array_interface__(self): return self.data.__array_interface__ class MyArray2(object): def __init__(self, data): self.data = data def __array__(self): return self.data for cls in [MyArray, MyArray2]: x = np.arange(5) assert_(np.may_share_memory(cls(x[::2]), x[1::2])) assert_(not np.shares_memory(cls(x[::2]), x[1::2])) assert_(np.shares_memory(cls(x[1::3]), x[::2])) assert_(np.may_share_memory(cls(x[1::3]), x[::2])) def view_element_first_byte(x): """Construct an array viewing the first byte of each element of `x`""" from numpy.lib.stride_tricks import DummyArray interface = dict(x.__array_interface__) interface['typestr'] = '|b1' interface['descr'] = [('', '|b1')] return np.asarray(DummyArray(interface, x)) def assert_copy_equivalent(operation, args, out, **kwargs): """ Check that operation(*args, out=out) produces results equivalent to out[...] = operation(*args, out=out.copy()) """ kwargs['out'] = out kwargs2 = dict(kwargs) kwargs2['out'] = out.copy() out_orig = out.copy() out[...] = operation(*args, **kwargs2) expected = out.copy() out[...] = out_orig got = operation(*args, **kwargs).copy() if (got != expected).any(): assert_equal(got, expected) class TestUFunc(object): """ Test ufunc call memory overlap handling """ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, count=5000): shapes = [7, 13, 8, 21, 29, 32] rng = np.random.RandomState(1234) for ndim in range(1, 6): x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = count // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) a_orig = a.copy() b_orig = b.copy() if get_out_axis_size is None: assert_copy_equivalent(operation, [a], out=b) if np.shares_memory(a, b): overlapping += 1 else: for axis in itertools.chain(range(ndim), [None]): a[...] = a_orig b[...] = b_orig # Determine size for reduction axis (None if scalar) outsize, scalarize = get_out_axis_size(a, b, axis) if outsize == 'skip': continue # Slice b to get an output array of the correct size sl = [slice(None)] * ndim if axis is None: if outsize is None: sl = [slice(0, 1)] + [0]*(ndim - 1) else: sl = [slice(0, outsize)] + [0]*(ndim - 1) else: if outsize is None: k = b.shape[axis]//2 if ndim == 1: sl[axis] = slice(k, k + 1) else: sl[axis] = k else: assert b.shape[axis] >= outsize sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: b_out = b_out.reshape([]) if np.shares_memory(a, b_out): overlapping += 1 # Check result assert_copy_equivalent(operation, [a], out=b_out, axis=axis) @pytest.mark.slow def test_unary_ufunc_call_fuzz(self): self.check_unary_fuzz(np.invert, None, np.int16) def test_binary_ufunc_accumulate_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # accumulate doesn't support this else: return a.shape[axis], False self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduce_fuzz(self): def get_out_axis_size(a, b, axis): return None, (axis is None or a.ndim == 1) self.check_unary_fuzz(np.add.reduce, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_fuzz(self): def get_out_axis_size(a, b, axis): if axis is None: if a.ndim == 1: return a.size, False else: return 'skip', False # reduceat doesn't support this else: return a.shape[axis], False def do_reduceat(a, out, axis): if axis is None: size = len(a) step = size//len(out) else: size = a.shape[axis] step = a.shape[axis] // out.shape[axis] idx = np.arange(0, size, step) return np.add.reduceat(a, idx, out=out, axis=axis) self.check_unary_fuzz(do_reduceat, get_out_axis_size, dtype=np.int16, count=500) def test_binary_ufunc_reduceat_manual(self): def check(ufunc, a, ind, out): c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) c2 = ufunc.reduceat(a, ind, out=out) assert_array_equal(c1, c2) # Exactly same input/output arrays a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1].copy(), a) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.add, a, a[::-1], a) def test_unary_gufunc_fuzz(self): shapes = [7, 13, 8, 21, 29, 32] gufunc = _umath_tests.euclidean_pdist rng = np.random.RandomState(1234) for ndim in range(2, 6): x = rng.rand(*shapes[:ndim]) it = iter_random_view_pairs(x, same_steps=False, equal_size=True) min_count = 500 // (ndim + 1)**2 overlapping = 0 while overlapping < min_count: a, b = next(it) if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: continue # Ensure the shapes are so that euclidean_pdist is happy if b.shape[-1] > b.shape[-2]: b = b[...,0,:] else: b = b[...,:,0] n = a.shape[-2] p = n * (n - 1) // 2 if p <= b.shape[-1] and p > 0: b = b[...,:p] else: n = max(2, int(np.sqrt(b.shape[-1]))//2) p = n * (n - 1) // 2 a = a[...,:n,:] b = b[...,:p] # Call if np.shares_memory(a, b): overlapping += 1 with np.errstate(over='ignore', invalid='ignore'): assert_copy_equivalent(gufunc, [a], out=b) def test_ufunc_at_manual(self): def check(ufunc, a, ind, b=None): a0 = a.copy() if b is None: ufunc.at(a0, ind.copy()) c1 = a0.copy() ufunc.at(a, ind) c2 = a.copy() else: ufunc.at(a0, ind.copy(), b.copy()) c1 = a0.copy() ufunc.at(a, ind, b) c2 = a.copy() assert_array_equal(c1, c2) # Overlap with index a = np.arange(10000, dtype=np.int16) check(np.invert, a[::-1], a) # Overlap with second data array a = np.arange(100, dtype=np.int16) ind = np.arange(0, 100, 2, dtype=np.int16) check(np.add, a, ind, a[25:75]) def test_unary_ufunc_1d_manual(self): # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE def check(a, b): a_orig = a.copy() b_orig = b.copy() b0 = b.copy() c1 = ufunc(a, out=b0) c2 = ufunc(a, out=b) assert_array_equal(c1, c2) # Trigger "fancy ufunc loop" code path mask = view_element_first_byte(b).view(np.bool_) a[...] = a_orig b[...] = b_orig c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() a[...] = a_orig b[...] = b_orig c2 = ufunc(a, out=b, where=mask.copy()).copy() # Also, mask overlapping with output a[...] = a_orig b[...] = b_orig c3 = ufunc(a, out=b, where=mask).copy() assert_array_equal(c1, c2) assert_array_equal(c1, c3) dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128] dtypes = [np.dtype(x) for x in dtypes] for dtype in dtypes: if np.issubdtype(dtype, np.integer): ufunc = np.invert else: ufunc = np.reciprocal n = 1000 k = 10 indices = [ np.index_exp[:n], np.index_exp[k:k+n], np.index_exp[n-1::-1], np.index_exp[k+n-1:k-1:-1], np.index_exp[:2*n:2], np.index_exp[k:k+2*n:2], np.index_exp[2*n-1::-2], np.index_exp[k+2*n-1:k-1:-2], ] for xi, yi in itertools.product(indices, indices): v = np.arange(1, 1 + n*2 + k, dtype=dtype) x = v[xi] y = v[yi] with np.errstate(all='ignore'): check(x, y) # Scalar cases check(x[:1], y) check(x[-1:], y) check(x[:1].reshape([]), y) check(x[-1:].reshape([]), y) def test_unary_ufunc_where_same(self): # Check behavior at wheremask overlap ufunc = np.invert def check(a, out, mask): c1 = ufunc(a, out=out.copy(), where=mask.copy()) c2 = ufunc(a, out=out, where=mask) assert_array_equal(c1, c2) # Check behavior with same input and output arrays x = np.arange(100).astype(np.bool_) check(x, x, x) check(x, x.copy(), x) check(x, x, x.copy()) @pytest.mark.slow def test_binary_ufunc_1d_manual(self): ufunc = np.add def check(a, b, c): c0 = c.copy() c1 = ufunc(a, b, out=c0) c2 = ufunc(a, b, out=c) assert_array_equal(c1, c2) for dtype in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128]: # Check different data dependency orders n = 1000 k = 10 indices = [] for p in [1, 2]: indices.extend([ np.index_exp[:p*n:p], np.index_exp[k:k+p*n:p], np.index_exp[p*n-1::-p], np.index_exp[k+p*n-1:k-1:-p], ]) for x, y, z in itertools.product(indices, indices, indices): v = np.arange(6*n).astype(dtype) x = v[x] y = v[y] z = v[z] check(x, y, z) # Scalar cases check(x[:1], y, z) check(x[-1:], y, z) check(x[:1].reshape([]), y, z) check(x[-1:].reshape([]), y, z) check(x, y[:1], z) check(x, y[-1:], z) check(x, y[:1].reshape([]), z) check(x, y[-1:].reshape([]), z) def test_inplace_op_simple_manual(self): rng = np.random.RandomState(1234) x = rng.rand(200, 200) # bigger than bufsize x += x.T assert_array_equal(x - x.T, 0)
ahaldane/numpy
numpy/core/tests/test_mem_overlap.py
numpy/distutils/fcompiler/ibm.py
import sys import warnings import functools import operator import numpy as np from numpy.core._multiarray_tests import array_indexing from itertools import product from numpy.testing import ( assert_, assert_equal, assert_raises, assert_array_equal, assert_warns, HAS_REFCOUNT, ) class TestIndexing: def test_index_no_floats(self): a = np.array([[[5]]]) assert_raises(IndexError, lambda: a[0.0]) assert_raises(IndexError, lambda: a[0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0]) assert_raises(IndexError, lambda: a[0.0,:]) assert_raises(IndexError, lambda: a[:, 0.0]) assert_raises(IndexError, lambda: a[:, 0.0,:]) assert_raises(IndexError, lambda: a[0.0,:,:]) assert_raises(IndexError, lambda: a[0, 0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0, 0]) assert_raises(IndexError, lambda: a[0, 0.0, 0]) assert_raises(IndexError, lambda: a[-1.4]) assert_raises(IndexError, lambda: a[0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0]) assert_raises(IndexError, lambda: a[-1.4,:]) assert_raises(IndexError, lambda: a[:, -1.4]) assert_raises(IndexError, lambda: a[:, -1.4,:]) assert_raises(IndexError, lambda: a[-1.4,:,:]) assert_raises(IndexError, lambda: a[0, 0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0, 0]) assert_raises(IndexError, lambda: a[0, -1.4, 0]) assert_raises(IndexError, lambda: a[0.0:, 0.0]) assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) def test_slicing_no_floats(self): a = np.array([[5]]) # start as float. assert_raises(TypeError, lambda: a[0.0:]) assert_raises(TypeError, lambda: a[0:, 0.0:2]) assert_raises(TypeError, lambda: a[0.0::2, :0]) assert_raises(TypeError, lambda: a[0.0:1:2,:]) assert_raises(TypeError, lambda: a[:, 0.0:]) # stop as float. assert_raises(TypeError, lambda: a[:0.0]) assert_raises(TypeError, lambda: a[:0, 1:2.0]) assert_raises(TypeError, lambda: a[:0.0:2, :0]) assert_raises(TypeError, lambda: a[:0.0,:]) assert_raises(TypeError, lambda: a[:, 0:4.0:2]) # step as float. assert_raises(TypeError, lambda: a[::1.0]) assert_raises(TypeError, lambda: a[0:, :2:2.0]) assert_raises(TypeError, lambda: a[1::4.0, :0]) assert_raises(TypeError, lambda: a[::5.0,:]) assert_raises(TypeError, lambda: a[:, 0:4:2.0]) # mixed. assert_raises(TypeError, lambda: a[1.0:2:2.0]) assert_raises(TypeError, lambda: a[1.0::2.0]) assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) # should still get the DeprecationWarning if step = 0. assert_raises(TypeError, lambda: a[::0.0]) def test_index_no_array_to_index(self): # No non-scalar arrays. a = np.array([[[1]]]) assert_raises(TypeError, lambda: a[a:a:a]) def test_none_index(self): # `None` index adds newaxis a = np.array([1, 2, 3]) assert_equal(a[None], a[np.newaxis]) assert_equal(a[None].ndim, a.ndim + 1) def test_empty_tuple_index(self): # Empty tuple index creates a view a = np.array([1, 2, 3]) assert_equal(a[()], a) assert_(a[()].base is a) a = np.array(0) assert_(isinstance(a[()], np.int_)) def test_void_scalar_empty_tuple(self): s = np.zeros((), dtype='V4') assert_equal(s[()].dtype, s.dtype) assert_equal(s[()], s) assert_equal(type(s[...]), np.ndarray) def test_same_kind_index_casting(self): # Indexes should be cast with same-kind and not safe, even if that # is somewhat unsafe. So test various different code paths. index = np.arange(5) u_index = index.astype(np.uintp) arr = np.arange(10) assert_array_equal(arr[index], arr[u_index]) arr[u_index] = np.arange(5) assert_array_equal(arr, np.arange(10)) arr = np.arange(10).reshape(5, 2) assert_array_equal(arr[index], arr[u_index]) arr[u_index] = np.arange(5)[:,None] assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) arr = np.arange(25).reshape(5, 5) assert_array_equal(arr[u_index, u_index], arr[index, index]) def test_empty_fancy_index(self): # Empty list index creates an empty array # with the same dtype (but with weird shape) a = np.array([1, 2, 3]) assert_equal(a[[]], []) assert_equal(a[[]].dtype, a.dtype) b = np.array([], dtype=np.intp) assert_equal(a[[]], []) assert_equal(a[[]].dtype, a.dtype) b = np.array([]) assert_raises(IndexError, a.__getitem__, b) def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert_(a[...] is not a) assert_equal(a[...], a) # `a[...]` was `a` in numpy <1.9. assert_(a[...].base is a) # Slicing with ellipsis can skip an # arbitrary number of dimensions assert_equal(a[0, ...], a[0]) assert_equal(a[0, ...], a[0,:]) assert_equal(a[..., 0], a[:, 0]) # Slicing with ellipsis always results # in an array, not a scalar assert_equal(a[0, ..., 1], np.array(2)) # Assignment with `(Ellipsis,)` on 0-d arrays b = np.array(1) b[(Ellipsis,)] = 2 assert_equal(b, 2) def test_single_int_index(self): # Single integer index selects one row a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert_equal(a[0], [1, 2, 3]) assert_equal(a[-1], [7, 8, 9]) # Index out of bounds produces IndexError assert_raises(IndexError, a.__getitem__, 1 << 30) # Index overflow produces IndexError assert_raises(IndexError, a.__getitem__, 1 << 64) def test_single_bool_index(self): # Single boolean index a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) assert_equal(a[np.array(True)], a[None]) assert_equal(a[np.array(False)], a[None][0:0]) def test_boolean_shape_mismatch(self): arr = np.ones((5, 4, 3)) index = np.array([True]) assert_raises(IndexError, arr.__getitem__, index) index = np.array([False] * 6) assert_raises(IndexError, arr.__getitem__, index) index = np.zeros((4, 4), dtype=bool) assert_raises(IndexError, arr.__getitem__, index) assert_raises(IndexError, arr.__getitem__, (slice(None), index)) def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one a = np.array([[ 0., 0., 0.]]) b = np.array([ True], dtype=bool) assert_equal(a[b], a) # boolean assignment a[b] = 1. assert_equal(a, [[1., 1., 1.]]) def test_boolean_assignment_value_mismatch(self): # A boolean assignment should fail when the shape of the values # cannot be broadcast to the subscription. (see also gh-3458) a = np.arange(4) def f(a, v): a[a > -1] = v assert_raises(ValueError, f, a, []) assert_raises(ValueError, f, a, [1, 2, 3]) assert_raises(ValueError, f, a[:1], [1, 2, 3]) def test_boolean_assignment_needs_api(self): # See also gh-7666 # This caused a segfault on Python 2 due to the GIL not being # held when the iterator does not need it, but the transfer function # does arr = np.zeros(1000) indx = np.zeros(1000, dtype=bool) indx[:100] = True arr[indx] = np.ones(100, dtype=object) expected = np.zeros(1000) expected[:100] = 1 assert_array_equal(arr, expected) def test_boolean_indexing_twodim(self): # Indexing a 2-dimensional array with # 2-dimensional boolean array a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([[ True, False, True], [False, True, False], [ True, False, True]]) assert_equal(a[b], [1, 3, 5, 7, 9]) assert_equal(a[b[1]], [[4, 5, 6]]) assert_equal(a[b[0]], a[b[2]]) # boolean assignment a[b] = 0 assert_equal(a, [[0, 2, 0], [4, 0, 6], [0, 8, 0]]) def test_boolean_indexing_list(self): # Regression test for #13715. It's a use-after-free bug which the # test won't directly catch, but it will show up in valgrind. a = np.array([1, 2, 3]) b = [True, False, True] # Two variants of the test because the first takes a fast path assert_equal(a[b], [1, 3]) assert_equal(a[None, b], [[1, 3]]) def test_reverse_strides_and_subspace_bufferinit(self): # This tests that the strides are not reversed for simple and # subspace fancy indexing. a = np.ones(5) b = np.zeros(5, dtype=np.intp)[::-1] c = np.arange(5)[::-1] a[b] = c # If the strides are not reversed, the 0 in the arange comes last. assert_equal(a[0], 0) # This also tests that the subspace buffer is initialized: a = np.ones((5, 2)) c = np.arange(10).reshape(5, 2)[::-1] a[b, :] = c assert_equal(a[0], [0, 1]) def test_reversed_strides_result_allocation(self): # Test a bug when calculating the output strides for a result array # when the subspace size was 1 (and test other cases as well) a = np.arange(10)[:, None] i = np.arange(10)[::-1] assert_array_equal(a[i], a[i.copy('C')]) a = np.arange(20).reshape(-1, 2) def test_uncontiguous_subspace_assignment(self): # During development there was a bug activating a skip logic # based on ndim instead of size. a = np.full((3, 4, 2), -1) b = np.full((3, 4, 2), -1) a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() assert_equal(a, b) def test_too_many_fancy_indices_special_case(self): # Just documents behaviour, this is a small limitation. a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) def test_scalar_array_bool(self): # NumPy bools can be used as boolean index (python ones as of yet not) a = np.array(1) assert_equal(a[np.bool_(True)], a[np.array(True)]) assert_equal(a[np.bool_(False)], a[np.array(False)]) # After deprecating bools as integers: #a = np.array([0,1,2]) #assert_equal(a[True, :], a[None, :]) #assert_equal(a[:, True], a[:, None]) # #assert_(not np.may_share_memory(a, a[True, :])) def test_everything_returns_views(self): # Before `...` would return a itself. a = np.arange(5) assert_(a is not a[()]) assert_(a is not a[...]) assert_(a is not a[:]) def test_broaderrors_indexing(self): a = np.zeros((5, 5)) assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) def test_trivial_fancy_out_of_bounds(self): a = np.zeros(5) ind = np.ones(20, dtype=np.intp) ind[-1] = 10 assert_raises(IndexError, a.__getitem__, ind) assert_raises(IndexError, a.__setitem__, ind, 0) ind = np.ones(20, dtype=np.intp) ind[0] = 11 assert_raises(IndexError, a.__getitem__, ind) assert_raises(IndexError, a.__setitem__, ind, 0) def test_trivial_fancy_not_possible(self): # Test that the fast path for trivial assignment is not incorrectly # used when the index is not contiguous or 1D, see also gh-11467. a = np.arange(6) idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] assert_array_equal(a[idx], idx) # this case must not go into the fast path, note that idx is # a non-contiuguous none 1D array here. a[idx] = -1 res = np.arange(6) res[0] = -1 res[3] = -1 assert_array_equal(a, res) def test_nonbaseclass_values(self): class SubClass(np.ndarray): def __array_finalize__(self, old): # Have array finalize do funny things self.fill(99) a = np.zeros((5, 5)) s = a.copy().view(type=SubClass) s.fill(1) a[[0, 1, 2, 3, 4], :] = s assert_((a == 1).all()) # Subspace is last, so transposing might want to finalize a[:, [0, 1, 2, 3, 4]] = s assert_((a == 1).all()) a.fill(0) a[...] = s assert_((a == 1).all()) def test_subclass_writeable(self): d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], dtype=[('target', 'S20'), ('V_mag', '>f4')]) ind = np.array([False, True, True], dtype=bool) assert_(d[ind].flags.writeable) ind = np.array([0, 1]) assert_(d[ind].flags.writeable) assert_(d[...].flags.writeable) assert_(d[0].flags.writeable) def test_memory_order(self): # This is not necessary to preserve. Memory layouts for # more complex indices are not as simple. a = np.arange(10) b = np.arange(10).reshape(5,2).T assert_(a[b].flags.f_contiguous) # Takes a different implementation branch: a = a.reshape(-1, 1) assert_(a[b, 0].flags.f_contiguous) def test_scalar_return_type(self): # Full scalar indices should return scalars and object # arrays should not call PyArray_Return on their items class Zero: # The most basic valid indexing def __index__(self): return 0 z = Zero() class ArrayLike: # Simple array, should behave like the array def __array__(self): return np.array(0) a = np.zeros(()) assert_(isinstance(a[()], np.float_)) a = np.zeros(1) assert_(isinstance(a[z], np.float_)) a = np.zeros((1, 1)) assert_(isinstance(a[z, np.array(0)], np.float_)) assert_(isinstance(a[z, ArrayLike()], np.float_)) # And object arrays do not call it too often: b = np.array(0) a = np.array(0, dtype=object) a[()] = b assert_(isinstance(a[()], np.ndarray)) a = np.array([b, None]) assert_(isinstance(a[z], np.ndarray)) a = np.array([[b, None]]) assert_(isinstance(a[z, np.array(0)], np.ndarray)) assert_(isinstance(a[z, ArrayLike()], np.ndarray)) def test_small_regressions(self): # Reference count of intp for index checks a = np.array([0]) if HAS_REFCOUNT: refcount = sys.getrefcount(np.dtype(np.intp)) # item setting always checks indices in separate function: a[np.array([0], dtype=np.intp)] = 1 a[np.array([0], dtype=np.uint8)] = 1 assert_raises(IndexError, a.__setitem__, np.array([1], dtype=np.intp), 1) assert_raises(IndexError, a.__setitem__, np.array([1], dtype=np.uint8), 1) if HAS_REFCOUNT: assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) def test_unaligned(self): v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] d = v.view(np.dtype("S8")) # unaligned source x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) #trivial assert_equal(d[b], d) d[b] = x # nontrivial # unaligned index array b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] b = b.view(np.intp)[:d.size] b[...] = np.arange(d.size) assert_equal(d[b.astype(np.int16)], d) d[b.astype(np.int16)] = x # boolean d[b % 2 == 0] d[b % 2 == 0] = x[::2] def test_tuple_subclass(self): arr = np.ones((5, 5)) # A tuple subclass should also be an nd-index class TupleSubclass(tuple): pass index = ([1], [1]) index = TupleSubclass(index) assert_(arr[index].shape == (1,)) # Unlike the non nd-index: assert_(arr[index,].shape != (1,)) def test_broken_sequence_not_nd_index(self): # See gh-5063: # If we have an object which claims to be a sequence, but fails # on item getting, this should not be converted to an nd-index (tuple) # If this object happens to be a valid index otherwise, it should work # This object here is very dubious and probably bad though: class SequenceLike: def __index__(self): return 0 def __len__(self): return 1 def __getitem__(self, item): raise IndexError('Not possible') arr = np.arange(10) assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) # also test that field indexing does not segfault # for a similar reason, by indexing a structured array arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) def test_indexing_array_weird_strides(self): # See also gh-6221 # the shapes used here come from the issue and create the correct # size for the iterator buffering size. x = np.ones(10) x2 = np.ones((10, 2)) ind = np.arange(10)[:, None, None, None] ind = np.broadcast_to(ind, (10, 55, 4, 4)) # single advanced index case assert_array_equal(x[ind], x[ind.copy()]) # higher dimensional advanced index zind = np.zeros(4, dtype=np.intp) assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) def test_indexing_array_negative_strides(self): # From gh-8264, # core dumps if negative strides are used in iteration arro = np.zeros((4, 4)) arr = arro[::-1, ::-1] slices = (slice(None), [0, 1, 2, 3]) arr[slices] = 10 assert_array_equal(arr, 10.) class TestFieldIndexing: def test_scalar_return_type(self): # Field access on an array should return an array, even if it # is 0-d. a = np.zeros((), [('a','f8')]) assert_(isinstance(a['a'], np.ndarray)) assert_(isinstance(a[['a']], np.ndarray)) class TestBroadcastedAssignments: def assign(self, a, ind, val): a[ind] = val return a def test_prepending_ones(self): a = np.zeros((3, 2)) a[...] = np.ones((1, 3, 2)) # Fancy with subspace with and without transpose a[[0, 1, 2], :] = np.ones((1, 3, 2)) a[:, [0, 1]] = np.ones((1, 3, 2)) # Fancy without subspace (with broadcasting) a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) def test_prepend_not_one(self): assign = self.assign s_ = np.s_ a = np.zeros(5) # Too large and not only ones. assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) def test_simple_broadcasting_errors(self): assign = self.assign s_ = np.s_ a = np.zeros((5, 1)) assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) def test_index_is_larger(self): # Simple case of fancy index broadcasting of the index. a = np.zeros((5, 5)) a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] assert_((a[:3, :3] == [2, 3, 4]).all()) def test_broadcast_subspace(self): a = np.zeros((100, 100)) v = np.arange(100)[:,None] b = np.arange(100)[::-1] a[b] = v assert_((a[::-1] == v).all()) class TestSubclasses: def test_basic(self): # Test that indexing in various ways produces SubClass instances, # and that the base is set up correctly: the original subclass # instance for views, and a new ndarray for advanced/boolean indexing # where a copy was made (latter a regression test for gh-11983). class SubClass(np.ndarray): pass a = np.arange(5) s = a.view(SubClass) s_slice = s[:3] assert_(type(s_slice) is SubClass) assert_(s_slice.base is s) assert_array_equal(s_slice, a[:3]) s_fancy = s[[0, 1, 2]] assert_(type(s_fancy) is SubClass) assert_(s_fancy.base is not s) assert_(type(s_fancy.base) is np.ndarray) assert_array_equal(s_fancy, a[[0, 1, 2]]) assert_array_equal(s_fancy.base, a[[0, 1, 2]]) s_bool = s[s > 0] assert_(type(s_bool) is SubClass) assert_(s_bool.base is not s) assert_(type(s_bool.base) is np.ndarray) assert_array_equal(s_bool, a[a > 0]) assert_array_equal(s_bool.base, a[a > 0]) def test_fancy_on_read_only(self): # Test that fancy indexing on read-only SubClass does not make a # read-only copy (gh-14132) class SubClass(np.ndarray): pass a = np.arange(5) s = a.view(SubClass) s.flags.writeable = False s_fancy = s[[0, 1, 2]] assert_(s_fancy.flags.writeable) def test_finalize_gets_full_info(self): # Array finalize should be called on the filled array. class SubClass(np.ndarray): def __array_finalize__(self, old): self.finalize_status = np.array(self) self.old = old s = np.arange(10).view(SubClass) new_s = s[:3] assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) new_s = s[[0,1,2,3]] assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) new_s = s[s > 0] assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) class TestFancyIndexingCast: def test_boolean_index_cast_assign(self): # Setup the boolean index and float arrays. shape = (8, 63) bool_index = np.zeros(shape).astype(bool) bool_index[0, 1] = True zero_array = np.zeros(shape) # Assigning float is fine. zero_array[bool_index] = np.array([1]) assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. assert_warns(np.ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. assert_warns(np.ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. # The right hand side cannot be converted to an array here. a = np.arange(5, dtype=object) b = a.copy() a[:3] = [1, (1,2), 3] b[[0, 1, 2]] = [1, (1,2), 3] assert_array_equal(a, b) # test same for subspace fancy indexing b = np.arange(5, dtype=object)[None, :] b[[0], :3] = [[1, (1,2), 3]] assert_array_equal(a, b[0]) # Check that swapping of axes works. # There was a bug that made the later assignment throw a ValueError # do to an incorrectly transposed temporary right hand side (gh-5714) b = b.T b[:3, [0]] = [[1], [(1,2)], [3]] assert_array_equal(a, b[:, 0]) # Another test for the memory order of the subspace arr = np.ones((3, 4, 5), dtype=object) # Equivalent slicing assignment for comparison cmp_arr = arr.copy() cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] arr[[0], ...] = [[[1], [2], [3], [4]]] assert_array_equal(arr, cmp_arr) arr = arr.copy('F') arr[[0], ...] = [[[1], [2], [3], [4]]] assert_array_equal(arr, cmp_arr) def test_cast_equivalence(self): # Yes, normal slicing uses unsafe casting. a = np.arange(5) b = a.copy() a[:3] = np.array(['2', '-3', '-1']) b[[0, 2, 1]] = np.array(['2', '-1', '-3']) assert_array_equal(a, b) # test the same for subspace fancy indexing b = np.arange(5)[None, :] b[[0], :3] = np.array([['2', '-3', '-1']]) assert_array_equal(a, b[0]) class TestMultiIndexingAutomated: """ These tests use code to mimic the C-Code indexing for selection. NOTE: * This still lacks tests for complex item setting. * If you change behavior of indexing, you might want to modify these tests to try more combinations. * Behavior was written to match numpy version 1.8. (though a first version matched 1.7.) * Only tuple indices are supported by the mimicking code. (and tested as of writing this) * Error types should match most of the time as long as there is only one error. For multiple errors, what gets raised will usually not be the same one. They are *not* tested. Update 2016-11-30: It is probably not worth maintaining this test indefinitely and it can be dropped if maintenance becomes a burden. """ def setup(self): self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) self.b = np.empty((3, 0, 5, 6)) self.complex_indices = ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False np.array([True, False, False]), np.array([[True, False], [False, True]]), np.array([[[False, False], [False, False]]]), # Some slices: slice(-5, 5, 2), slice(1, 1, 100), slice(4, -1, -2), slice(None, None, -3), # Some Fancy indexes: np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast np.array([0, 1, -2]), np.array([[2], [0], [1]]), np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), np.array([2, -1], dtype=np.int8), np.zeros([1]*31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype # Some simpler indices that still cover a bit more self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] # Very simple ones to fill the rest: self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. Parameters ---------- arr : ndarray Array to be indexed. indices : tuple of index objects Returns ------- out : ndarray An array equivalent to the indexing operation (but always a copy). `arr[indices]` should be identical. no_copy : bool Whether the indexing operation requires a copy. If this is `True`, `np.may_share_memory(arr, arr[indices])` should be `True` (with some exceptions for scalars and possibly 0-d arrays). Notes ----- While the function may mostly match the errors of normal indexing this is generally not the case. """ in_indices = list(indices) indices = [] # if False, this is a fancy or boolean index no_copy = True # number of fancy/scalar indexes that are not consecutive num_fancy = 0 # number of dimensions indexed by a "fancy" index fancy_dim = 0 # NOTE: This is a funny twist (and probably OK to change). # The boolean array has illegal indexes, but this is # allowed if the broadcast fancy-indices are 0-sized. # This variable is to catch that case. error_unless_broadcast_to_empty = False # We need to handle Ellipsis and make arrays from indices, also # check if this is fancy indexing (set no_copy). ndim = 0 ellipsis_pos = None # define here mostly to replace all but first. for i, indx in enumerate(in_indices): if indx is None: continue if isinstance(indx, np.ndarray) and indx.dtype == bool: no_copy = False if indx.ndim == 0: raise IndexError # boolean indices can have higher dimensions ndim += indx.ndim fancy_dim += indx.ndim continue if indx is Ellipsis: if ellipsis_pos is None: ellipsis_pos = i continue # do not increment ndim counter raise IndexError if isinstance(indx, slice): ndim += 1 continue if not isinstance(indx, np.ndarray): # This could be open for changes in numpy. # numpy should maybe raise an error if casting to intp # is not safe. It rejects np.array([1., 2.]) but not # [1., 2.] as index (same for ie. np.take). # (Note the importance of empty lists if changing this here) try: indx = np.array(indx, dtype=np.intp) except ValueError: raise IndexError in_indices[i] = indx elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': raise IndexError('arrays used as indices must be of ' 'integer (or boolean) type') if indx.ndim != 0: no_copy = False ndim += 1 fancy_dim += 1 if arr.ndim - ndim < 0: # we can't take more dimensions then we have, not even for 0-d # arrays. since a[()] makes sense, but not a[(),]. We will # raise an error later on, unless a broadcasting error occurs # first. raise IndexError if ndim == 0 and None not in in_indices: # Well we have no indexes or one Ellipsis. This is legal. return arr.copy(), no_copy if ellipsis_pos is not None: in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * (arr.ndim - ndim)) for ax, indx in enumerate(in_indices): if isinstance(indx, slice): # convert to an index array indx = np.arange(*indx.indices(arr.shape[ax])) indices.append(['s', indx]) continue elif indx is None: # this is like taking a slice with one element from a new axis: indices.append(['n', np.array([0], dtype=np.intp)]) arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) continue if isinstance(indx, np.ndarray) and indx.dtype == bool: if indx.shape != arr.shape[ax:ax+indx.ndim]: raise IndexError try: flat_indx = np.ravel_multi_index(np.nonzero(indx), arr.shape[ax:ax+indx.ndim], mode='raise') except Exception: error_unless_broadcast_to_empty = True # fill with 0s instead, and raise error later flat_indx = np.array([0]*indx.sum(), dtype=np.intp) # concatenate axis into a single one: if indx.ndim != 0: arr = arr.reshape((arr.shape[:ax] + (np.prod(arr.shape[ax:ax+indx.ndim]),) + arr.shape[ax+indx.ndim:])) indx = flat_indx else: # This could be changed, a 0-d boolean index can # make sense (even outside the 0-d indexed array case) # Note that originally this is could be interpreted as # integer in the full integer special case. raise IndexError else: # If the index is a singleton, the bounds check is done # before the broadcasting. This used to be different in <1.9 if indx.ndim == 0: if indx >= arr.shape[ax] or indx < -arr.shape[ax]: raise IndexError if indx.ndim == 0: # The index is a scalar. This used to be two fold, but if # fancy indexing was active, the check was done later, # possibly after broadcasting it away (1.7. or earlier). # Now it is always done. if indx >= arr.shape[ax] or indx < - arr.shape[ax]: raise IndexError if (len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos): # NOTE: There could still have been a 0-sized Ellipsis # between them. Checked that with ellipsis_pos. indices[-1].append(indx) else: # We have a fancy index that is not after an existing one. # NOTE: A 0-d array triggers this as well, while one may # expect it to not trigger it, since a scalar would not be # considered fancy indexing. num_fancy += 1 indices.append(['f', indx]) if num_fancy > 1 and not no_copy: # We have to flush the fancy indexes left new_indices = indices[:] axes = list(range(arr.ndim)) fancy_axes = [] new_indices.insert(0, ['f']) ni = 0 ai = 0 for indx in indices: ni += 1 if indx[0] == 'f': new_indices[0].extend(indx[1:]) del new_indices[ni] ni -= 1 for ax in range(ai, ai + len(indx[1:])): fancy_axes.append(ax) axes.remove(ax) ai += len(indx) - 1 # axis we are at indices = new_indices # and now we need to transpose arr: arr = arr.transpose(*(fancy_axes + axes)) # We only have one 'f' index now and arr is transposed accordingly. # Now handle newaxis by reshaping... ax = 0 for indx in indices: if indx[0] == 'f': if len(indx) == 1: continue # First of all, reshape arr to combine fancy axes into one: orig_shape = arr.shape orig_slice = orig_shape[ax:ax + len(indx[1:])] arr = arr.reshape((arr.shape[:ax] + (np.prod(orig_slice).astype(int),) + arr.shape[ax + len(indx[1:]):])) # Check if broadcasting works res = np.broadcast(*indx[1:]) # unfortunately the indices might be out of bounds. So check # that first, and use mode='wrap' then. However only if # there are any indices... if res.size != 0: if error_unless_broadcast_to_empty: raise IndexError for _indx, _size in zip(indx[1:], orig_slice): if _indx.size == 0: continue if np.any(_indx >= _size) or np.any(_indx < -_size): raise IndexError if len(indx[1:]) == len(orig_slice): if np.product(orig_slice) == 0: # Work around for a crash or IndexError with 'wrap' # in some 0-sized cases. try: mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise') except Exception: # This happens with 0-sized orig_slice (sometimes?) # here it is a ValueError, but indexing gives a: raise IndexError('invalid index into 0-sized') else: mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap') else: # Maybe never happens... raise ValueError arr = arr.take(mi.ravel(), axis=ax) try: arr = arr.reshape((arr.shape[:ax] + mi.shape + arr.shape[ax+1:])) except ValueError: # too many dimensions, probably raise IndexError ax += mi.ndim continue # If we are here, we have a 1D array for take: arr = arr.take(indx[1], axis=ax) ax += 1 return arr, no_copy def _check_multi_index(self, arr, index): """Check a multi index item getting and simple setting. Parameters ---------- arr : ndarray Array to be indexed, must be a reshaped arange. index : tuple of indexing objects Index being tested. """ # Test item getting try: mimic_get, no_copy = self._get_multi_index(arr, index) except Exception as e: if HAS_REFCOUNT: prev_refcount = sys.getrefcount(arr) assert_raises(type(e), arr.__getitem__, index) assert_raises(type(e), arr.__setitem__, index, 0) if HAS_REFCOUNT: assert_equal(prev_refcount, sys.getrefcount(arr)) return self._compare_index_result(arr, index, mimic_get, no_copy) def _check_single_index(self, arr, index): """Check a single index item getting and simple setting. Parameters ---------- arr : ndarray Array to be indexed, must be an arange. index : indexing object Index being tested. Must be a single index and not a tuple of indexing objects (see also `_check_multi_index`). """ try: mimic_get, no_copy = self._get_multi_index(arr, (index,)) except Exception as e: if HAS_REFCOUNT: prev_refcount = sys.getrefcount(arr) assert_raises(type(e), arr.__getitem__, index) assert_raises(type(e), arr.__setitem__, index, 0) if HAS_REFCOUNT: assert_equal(prev_refcount, sys.getrefcount(arr)) return self._compare_index_result(arr, index, mimic_get, no_copy) def _compare_index_result(self, arr, index, mimic_get, no_copy): """Compare mimicked result to indexing result. """ arr = arr.copy() indexed_arr = arr[index] assert_array_equal(indexed_arr, mimic_get) # Check if we got a view, unless its a 0-sized or 0-d array. # (then its not a view, and that does not matter) if indexed_arr.size != 0 and indexed_arr.ndim != 0: assert_(np.may_share_memory(indexed_arr, arr) == no_copy) # Check reference count of the original array if HAS_REFCOUNT: if no_copy: # refcount increases by one: assert_equal(sys.getrefcount(arr), 3) else: assert_equal(sys.getrefcount(arr), 2) # Test non-broadcast setitem: b = arr.copy() b[index] = mimic_get + 1000 if b.size == 0: return # nothing to compare here... if no_copy and indexed_arr.ndim != 0: # change indexed_arr in-place to manipulate original: indexed_arr += 1000 assert_array_equal(arr, b) return # Use the fact that the array is originally an arange: arr.flat[indexed_arr.ravel()] += 1000 assert_array_equal(arr, b) def test_boolean(self): a = np.array(5) assert_equal(a[np.array(True)], 5) a[np.array(True)] = 1 assert_equal(a, 1) # NOTE: This is different from normal broadcasting, as # arr[boolean_array] works like in a multi index. Which means # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all self._check_multi_index( self.a, (np.zeros_like(self.a, dtype=bool),)) self._check_multi_index( self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) self._check_multi_index( self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. warnings.filterwarnings('error', '', DeprecationWarning) warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: tocheck = [self.fill_indices, self.complex_indices, self.fill_indices, self.fill_indices] tocheck[simple_pos] = self.simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) self._check_multi_index(self.a, index) self._check_multi_index(self.b, index) # Check very simple item getting: self._check_multi_index(self.a, (0, 0, 0, 0)) self._check_multi_index(self.b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) for index in self.complex_indices: self._check_single_index(a, index) class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. """ def test_valid_indexing(self): # These should raise no errors. a = np.array([[[5]]]) a[np.array([0])] a[[0, 0]] a[:, [0, 0]] a[:, 0,:] a[:,:,:] def test_valid_slicing(self): # These should raise no errors. a = np.array([[[5]]]) a[::] a[0:] a[:2] a[0:2] a[::2] a[1::2] a[:2:2] a[1:2:2] def test_non_integer_argument_errors(self): a = np.array([[5]]) assert_raises(TypeError, np.reshape, a, (1., 1., -1)) assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) assert_raises(TypeError, np.take, a, [0], 1.) assert_raises(TypeError, np.take, a, [0], np.float64(1.)) def test_non_integer_sequence_multiplication(self): # NumPy scalar sequence multiply should not work with non-integers def mult(a, b): return a * b assert_raises(TypeError, mult, [1], np.float_(3)) # following should be OK mult([1], np.int_(3)) def test_reduce_axis_float_index(self): d = np.zeros((3,3,3)) assert_raises(TypeError, np.min, d, 0.5) assert_raises(TypeError, np.min, d, (0.5, 1)) assert_raises(TypeError, np.min, d, (1, 2.2)) assert_raises(TypeError, np.min, d, (.2, 1.2)) class TestBooleanIndexing: # Using a boolean as integer argument/indexing is an error. def test_bool_as_int_argument_errors(self): a = np.array([[[1]]]) assert_raises(TypeError, np.reshape, a, (True, -1)) assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) # Note that operator.index(np.array(True)) does not work, a boolean # array is thus also deprecated, but not with the same message: assert_raises(TypeError, operator.index, np.array(True)) assert_warns(DeprecationWarning, operator.index, np.True_) assert_raises(TypeError, np.take, args=(a, [0], False)) def test_boolean_indexing_weirdness(self): # Weird boolean indexing things a = np.ones((2, 3, 4)) a[False, True, ...].shape == (0, 2, 3, 4) a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) assert_raises(IndexError, lambda: a[False, [0, 1], ...]) class TestArrayToIndexDeprecation: """Creating an an index from array not 0-D is an error. """ def test_array_to_index_error(self): # so no exception is expected. The raising is effectively tested above. a = np.array([[[1]]]) assert_raises(TypeError, operator.index, np.array([1])) assert_raises(TypeError, np.reshape, a, (a, -1)) assert_raises(TypeError, np.take, a, [0], a) class TestNonIntegerArrayLike: """Tests that array_likes only valid if can safely cast to integer. For instance, lists give IndexError when they cannot be safely cast to an integer. """ def test_basic(self): a = np.arange(10) assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) assert_raises(IndexError, a.__getitem__, (['1', '2'],)) # The following is valid a.__getitem__([]) class TestMultipleEllipsisError: """An index can only have a single ellipsis. """ def test_basic(self): a = np.arange(10) assert_raises(IndexError, lambda: a[..., ...]) assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) class TestCApiAccess: def test_getitem(self): subscript = functools.partial(array_indexing, 0) # 0-d arrays don't work: assert_raises(IndexError, subscript, np.ones(()), 0) # Out of bound values: assert_raises(IndexError, subscript, np.ones(10), 11) assert_raises(IndexError, subscript, np.ones(10), -11) assert_raises(IndexError, subscript, np.ones((10, 10)), 11) assert_raises(IndexError, subscript, np.ones((10, 10)), -11) a = np.arange(10) assert_array_equal(a[4], subscript(a, 4)) a = a.reshape(5, 2) assert_array_equal(a[-4], subscript(a, -4)) def test_setitem(self): assign = functools.partial(array_indexing, 1) # Deletion is impossible: assert_raises(ValueError, assign, np.ones(10), 0) # 0-d arrays don't work: assert_raises(IndexError, assign, np.ones(()), 0, 0) # Out of bound values: assert_raises(IndexError, assign, np.ones(10), 11, 0) assert_raises(IndexError, assign, np.ones(10), -11, 0) assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) a = np.arange(10) assign(a, 4, 10) assert_(a[4] == 10) a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10])
import sys import pytest import numpy as np import numpy.core._multiarray_tests as _multiarray_tests from numpy import array, arange, nditer, all from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, suppress_warnings ) def iter_multi_index(i): ret = [] while not i.finished: ret.append(i.multi_index) i.iternext() return ret def iter_indices(i): ret = [] while not i.finished: ret.append(i.index) i.iternext() return ret def iter_iterindices(i): ret = [] while not i.finished: ret.append(i.iterindex) i.iternext() return ret @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_iter_refcount(): # Make sure the iterator doesn't leak # Basic a = arange(6) dt = np.dtype('f4').newbyteorder() rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) with nditer(a, [], [['readwrite', 'updateifcopy']], casting='unsafe', op_dtypes=[dt]) as it: assert_(not it.iterationneedsapi) assert_(sys.getrefcount(a) > rc_a) assert_(sys.getrefcount(dt) > rc_dt) # del 'it' it = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) # With a copy a = arange(6, dtype='f4') dt = np.dtype('f4') rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) it = nditer(a, [], [['readwrite']], op_dtypes=[dt]) rc2_a = sys.getrefcount(a) rc2_dt = sys.getrefcount(dt) it2 = it.copy() assert_(sys.getrefcount(a) > rc2_a) assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) assert_equal(sys.getrefcount(dt), rc2_dt) it2 = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) del it2 # avoid pyflakes unused variable warning def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, [], [['readonly']]) assert_equal([x for x in i], a) # Fortran-order i = nditer(aview.T, [], [['readonly']]) assert_equal([x for x in i], a) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) assert_equal([x for x in i], a) def test_iter_c_order(): # Test forcing C order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='C') assert_equal([x for x in i], aview.ravel(order='C')) # Fortran-order i = nditer(aview.T, order='C') assert_equal([x for x in i], aview.T.ravel(order='C')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='C') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): # Test forcing F order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='F') assert_equal([x for x in i], aview.ravel(order='F')) # Fortran-order i = nditer(aview.T, order='F') assert_equal([x for x in i], aview.T.ravel(order='F')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='F') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): # Test forcing any contiguous (C or F) order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='A') assert_equal([x for x in i], aview.ravel(order='A')) # Fortran-order i = nditer(aview.T, order='A') assert_equal([x for x in i], aview.T.ravel(order='A')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='A') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='A')) def test_iter_best_order_multi_index_1d(): # The multi-indices should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) # 1D reversed order i = nditer(a[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) def test_iter_best_order_multi_index_2d(): # The multi-indices should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) def test_iter_best_order_multi_index_3d(): # The multi-indices should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_c_index_2d(): # The C index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) def test_iter_best_order_c_index_3d(): # The C index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) def test_iter_best_order_f_index_1d(): # The Fortran index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_f_index_2d(): # The Fortran index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) def test_iter_best_order_f_index_3d(): # The Fortran index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) def test_iter_no_inner_full_coalesce(): # Check no_inner iterators which coalesce into a single inner loop for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: size = np.prod(shape) a = arange(size) # Test each combination of forward and backwards indexing for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Fortran-order i = nditer(aview.T, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) def test_iter_no_inner_dim_coalescing(): # Check no_inner iterators whose dimensions may not coalesce completely # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension a = arange(24).reshape(2, 3, 4)[:,:, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) a = arange(24).reshape(2, 3, 4)[:, :-1,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) a = arange(24).reshape(2, 3, 4)[:-1,:,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) # Even with lots of 1-sized dimensions, should still coalesce a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (24,)) def test_iter_dim_coalescing(): # Check that the correct number of dimensions are coalesced # Tracking a multi-index disables coalescing a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(i.ndim, 3) # A tracked index can allow coalescing if it's compatible with the array a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, ['c_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['f_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) assert_equal(i.ndim, 3) # When C or F order is forced, coalescing may still occur a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, order='C') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='C') assert_equal(i.ndim, 3) i = nditer(a3d, order='F') assert_equal(i.ndim, 3) i = nditer(a3d.T, order='F') assert_equal(i.ndim, 1) i = nditer(a3d, order='A') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='A') assert_equal(i.ndim, 1) def test_iter_broadcasting(): # Standard NumPy broadcasting rules # 1D with scalar i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (6,)) # 2D with scalar i = nditer([arange(6).reshape(2, 3), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 1D i = nditer([arange(6).reshape(2, 3), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) i = nditer([arange(2).reshape(2, 1), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 2D i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 3D with scalar i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 1D i = nditer([arange(3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 2D i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 3D i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*3) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) def test_iter_itershape(): # Check that allocated outputs work with a specified shape a = np.arange(6, dtype='i2').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (2, 3, 4)) assert_equal(i.operands[1].strides, (24, 8, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (8, 24, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], order='F', op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (2, 6, 12)) # If we specify 1 in the itershape, it shouldn't allow broadcasting # of that dimension to a bigger value assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, 1, 4)) # Test bug that for no op_axes but itershape, they are NULLed correctly i = np.nditer([np.ones(2), None, None], itershape=(2,)) def test_iter_broadcasting_errors(): # Check that errors are thrown for bad broadcasting shapes # 1D with 1D assert_raises(ValueError, nditer, [arange(2), arange(3)], [], [['readonly']]*2) # 2D with 1D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(2)], [], [['readonly']]*2) # 2D with 2D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], [], [['readonly']]*2) # 3D with 3D assert_raises(ValueError, nditer, [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) # Verify that the error message mentions the right shapes try: nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 3), arange(6).reshape(2, 3)], [], [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the 3rd operand assert_(msg.find('(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) # The message should contain the broadcast shape assert_(msg.find('(1,2,3)') >= 0, 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) try: nditer([arange(6).reshape(2, 3), arange(2)], [], [['readonly'], ['readonly']], op_axes=[[0, 1], [0, np.newaxis]], itershape=(4, 3)) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain "shape->remappedshape" for each operand assert_(msg.find('(2,3)->(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) assert_(msg.find('(2,)->(2,newaxis)') >= 0, ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) try: nditer([np.zeros((2, 1, 1)), np.zeros((2,))], [], [['writeonly', 'no_broadcast'], ['readonly']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the bad operand assert_(msg.find('(2,1,1)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) # The message should contain the broadcast shape assert_(msg.find('(2,1,2)') >= 0, 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) def test_iter_flags_errors(): # Check that bad combinations of flags produce errors a = arange(6) # Not enough operands assert_raises(ValueError, nditer, [], [], []) # Too many operands assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) # Bad order parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') # Bad casting parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') # op_flags must match ops assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, ['c_index', 'f_index'], [['readonly']]) # Inner iteration and multi-indices/indices are incompatible assert_raises(ValueError, nditer, a, ['external_loop', 'multi_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'c_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'f_index'], [['readonly']]) # Must specify exactly one of readwrite/readonly/writeonly per operand assert_raises(ValueError, nditer, a, [], [[]]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly', 'readwrite']]) # Python scalars are always readonly assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) # Array scalars are always readonly assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) # Check readonly array a.flags.writeable = False assert_raises(ValueError, nditer, a, [], [['writeonly']]) assert_raises(ValueError, nditer, a, [], [['readwrite']]) a.flags.writeable = True # Multi-indices available only with the multi_index flag i = nditer(arange(6), [], [['readonly']]) assert_raises(ValueError, lambda i:i.multi_index, i) # Index available only with an index flag assert_raises(ValueError, lambda i:i.index, i) # GotoCoords and GotoIndex incompatible with buffering or no_inner def assign_multi_index(i): i.multi_index = (0,) def assign_index(i): i.index = 0 def assign_iterindex(i): i.iterindex = 0 def assign_iterrange(i): i.iterrange = (0, 1) i = nditer(arange(6), ['external_loop']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterindex, i) assert_raises(ValueError, assign_iterrange, i) i = nditer(arange(6), ['buffered']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterrange, i) # Can't iterate if size is zero assert_raises(ValueError, nditer, np.array([])) def test_iter_slice(): a, b, c = np.arange(3), np.arange(3), np.arange(3.) i = nditer([a, b, c], [], ['readwrite']) with i: i[0:2] = (3, 3) assert_equal(a, [3, 1, 2]) assert_equal(b, [3, 1, 2]) assert_equal(c, [0, 1, 2]) i[1] = 12 assert_equal(i[0:2], [3, 12]) def test_iter_assign_mapping(): a = np.arange(24, dtype='f8').reshape(2, 3, 4).T it = np.nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) with it: it.operands[0][...] = 3 it.operands[0][...] = 14 assert_equal(a, 14) it = np.nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) with it: x = it.operands[0][-1:1] x[...] = 14 it.operands[0][...] = -1234 assert_equal(a, -1234) # check for no warnings on dealloc x = None it = None def test_iter_nbo_align_contig(): # Check that byte order, alignment, and contig changes work # Byte order change by requesting a specific dtype a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) i = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with i: # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 assert_equal(au, [2]*6) del i # should not raise a warning # Byte order change by requesting NBO a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') as i: # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 12345 i.operands[0][:] = 2 assert_equal(au, [2]*6) # Unaligned input a = np.zeros((6*4+1,), dtype='i1')[1:] a.dtype = 'f4' a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy i = nditer(a, [], [['readonly']]) assert_(not i.operands[0].flags.aligned) assert_equal(i.operands[0], a) # With 'aligned', should make a copy with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: assert_(i.operands[0].flags.aligned) # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.operands[0], a) i.operands[0][:] = 3 assert_equal(a, [3]*6) # Discontiguous input a = arange(12) # If it is contiguous, shouldn't copy i = nditer(a[:6], [], [['readonly']]) assert_(i.operands[0].flags.contiguous) assert_equal(i.operands[0], a[:6]) # If it isn't contiguous, should buffer i = nditer(a[::2], ['buffered', 'external_loop'], [['readonly', 'contig']], buffersize=10) assert_(i[0].flags.contiguous) assert_equal(i[0], a[::2]) def test_iter_array_cast(): # Check that arrays are cast as requested # No cast 'f4' -> 'f4' a = np.arange(6, dtype='f4').reshape(2, 3) i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) with i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Byte-order cast '<f4' -> '>f4' a = np.arange(6, dtype='<f4').reshape(2, 3) with nditer(a, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('>f4')]) as i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('>f4')) # Safe case 'f4' -> 'f8' a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. assert_equal(i.operands[0].strides, (96, 8, 32)) a = a[::-1,:, ::-1] i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) assert_equal(i.operands[0].strides, (96, 8, 32)) # Same-kind cast 'f8' -> 'f4' -> 'f8' a = np.arange(24, dtype='f8').reshape(2, 3, 4).T with nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) as i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) assert_equal(i.operands[0].strides, (4, 16, 48)) # Check that WRITEBACKIFCOPY is activated at exit i.operands[0][2, 1, 1] = -12.5 assert_(a[2, 1, 1] != -12.5) assert_equal(a[2, 1, 1], -12.5) a = np.arange(6, dtype='i4')[::-2] with nditer(a, [], [['writeonly', 'updateifcopy']], casting='unsafe', op_dtypes=[np.dtype('f4')]) as i: assert_equal(i.operands[0].dtype, np.dtype('f4')) # Even though the stride was negative in 'a', it # becomes positive in the temporary assert_equal(i.operands[0].strides, (4,)) i.operands[0][:] = [1, 2, 3] assert_equal(a, [1, 2, 3]) def test_iter_array_cast_errors(): # Check that invalid casts are caught # Need to enable copying for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly']], op_dtypes=[np.dtype('f8')]) # Also need to allow casting for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='equiv', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='no', op_dtypes=[np.dtype('f4')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # '<f4' -> '>f4' should not work with casting='no' assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('>f4')]) # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('i4')]) assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], [['writeonly', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) def test_iter_scalar_cast(): # Check that scalars are cast as requested # No cast 'f4' -> 'f4' i = nditer(np.float32(2.5), [], [['readonly']], op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Safe cast 'f4' -> 'f8' i = nditer(np.float32(2.5), [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.value.dtype, np.dtype('f8')) assert_equal(i.value, 2.5) # Same-kind cast 'f8' -> 'f4' i = nditer(np.float64(2.5), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Unsafe cast 'f8' -> 'i4' i = nditer(np.float64(3.0), [], [['readonly', 'copy']], casting='unsafe', op_dtypes=[np.dtype('i4')]) assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.value.dtype, np.dtype('i4')) assert_equal(i.value, 3) # Readonly scalars may be cast even without setting COPY or BUFFERED i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) assert_equal(i[0].dtype, np.dtype('f8')) assert_equal(i[0], 3.) def test_iter_scalar_cast_errors(): # Check that invalid casts are caught # Need to allow copying/buffering for write casts of scalars to occur assert_raises(TypeError, nditer, np.float32(2), [], [['readwrite']], op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, 2.5, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) # 'f8' -> 'f4' isn't a safe cast if the value would overflow assert_raises(TypeError, nditer, np.float64(1e60), [], [['readonly']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, np.float32(2), [], [['readonly']], casting='same_kind', op_dtypes=[np.dtype('i4')]) def test_iter_object_arrays_basic(): # Check that object arrays work obj = {'a':3,'b':'d'} a = np.array([[1, 2, 3], None, obj, None], dtype='O') if HAS_REFCOUNT: rc = sys.getrefcount(obj) # Need to allow references for object arrays assert_raises(TypeError, nditer, a) if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a, ['refs_ok'], ['readonly']) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readonly'], order='C') assert_(i.iterationneedsapi) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readwrite'], order='C') with i: for x in i: x[...] = None vals, i, x = [None]*3 if HAS_REFCOUNT: assert_(sys.getrefcount(obj) == rc-1) assert_equal(a, np.array([None]*4, dtype='O')) def test_iter_object_arrays_conversions(): # Conversions to/from objects a = np.arange(6, dtype='O') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) a = np.arange(6, dtype='i4') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) # Non-contiguous object array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) a = a['a'] a[:] = np.arange(6) i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) #Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') with i: ob = i[0][()] if HAS_REFCOUNT: rc = sys.getrefcount(ob) for x in i: x[...] += 1 if HAS_REFCOUNT: assert_(sys.getrefcount(ob) == rc-1) assert_equal(a, np.arange(6)+98172489) def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.dtypes[1], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('u4')) assert_equal(i.dtypes[1], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) assert_equal(i.dtypes[3], np.dtype('c16')) assert_equal(i.value, (3, -12, 2j, 9)) # When allocating outputs, other outputs aren't factored in i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.dtypes[1], np.dtype('i4')) assert_equal(i.dtypes[2], np.dtype('c16')) # But, if common data types are requested, they are i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], ['common_dtype'], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) def test_iter_copy_if_overlap(): # Ensure the iterator makes copies on read/write overlap, if requested # Copy not needed, 1 op for flag in ['readonly', 'writeonly', 'readwrite']: a = arange(10) i = nditer([a], ['copy_if_overlap'], [[flag]]) with i: assert_(i.operands[0] is a) # Copy needed, 2 ops, read-write overlap x = arange(10) a = x[1:] b = x[:-1] with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: assert_(not np.shares_memory(*i.operands)) # Copy not needed with elementwise, 2 ops, exactly same arrays x = arange(10) a = x b = x i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], ['readwrite', 'overlap_assume_elementwise']]) with i: assert_(i.operands[0] is a and i.operands[1] is b) with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) # Copy not needed, 2 ops, no overlap x = arange(10) a = x[::2] b = x[1::2] i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) assert_(i.operands[0] is a and i.operands[1] is b) # Copy needed, 2 ops, read-write overlap x = arange(4, dtype=np.int8) a = x[3:] b = x.view(np.int32)[:1] with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: assert_(not np.shares_memory(*i.operands)) # Copy needed, 3 ops, read-write overlap for flag in ['writeonly', 'readwrite']: x = np.ones([10, 10]) a = x b = x.T c = x with nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['readonly'], [flag]]) as i: a2, b2, c2 = i.operands assert_(not np.shares_memory(a2, c2)) assert_(not np.shares_memory(b2, c2)) # Copy not needed, 3 ops, read-only overlap x = np.ones([10, 10]) a = x b = x.T c = x i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['readonly'], ['readonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) # Copy not needed, 3 ops, read-only overlap x = np.ones([10, 10]) a = x b = np.ones([10, 10]) c = x.T i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['writeonly'], ['readonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) # Copy not needed, 3 ops, write-only overlap x = np.arange(7) a = x[:3] b = x[3:6] c = x[4:7] i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['writeonly'], ['writeonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) def test_iter_op_axes(): # Check that custom axes work # Reverse the axes a = arange(6).reshape(2, 3) i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) assert_(all([x == y for (x, y) in i])) a = arange(24).reshape(2, 3, 4) i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) assert_(all([x == y for (x, y) in i])) # Broadcast 1D to any dimension a = arange(1, 31).reshape(2, 3, 5) b = arange(1, 3) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) b = arange(1, 4) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) b = arange(1, 6) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [np.newaxis, np.newaxis, 0]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting a = arange(24).reshape(2, 3, 4) b = arange(40).reshape(5, 2, 4) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting a = arange(12).reshape(3, 4) b = arange(20).reshape(4, 5) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, -1], [-1, 1]]) assert_equal(i.shape, (3, 5)) def test_iter_op_axes_errors(): # Check that custom axes throws errors for bad inputs # Wrong number of items in op_axes a = arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[2, 1], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 0], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): # Check that copying the iterator works correctly a = arange(24).reshape(2, 3, 4) # Simple iterator i = nditer(a) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Buffered iterator i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (3, 9) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (2, 18) next(i) next(i) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Casting iterator with nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='f8', buffersize=5) as i: j = i.copy() assert_equal([x[()] for x in j], a.ravel(order='F')) a = arange(24, dtype='<i4').reshape(2, 3, 4) with nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='>f8', buffersize=5) as i: j = i.copy() assert_equal([x[()] for x in j], a.ravel(order='F')) def test_iter_allocate_output_simple(): # Check that the iterator will properly allocate outputs # Simple case a = arange(6) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_buffered_readwrite(): # Allocated output with buffering + delay_bufalloc a = arange(6) i = nditer([a, None], ['buffered', 'delay_bufalloc'], [['readonly'], ['allocate', 'readwrite']]) with i: i.operands[1][:] = 1 i.reset() for x in i: x[1][...] += x[0][...] assert_equal(i.operands[1], a+1) def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order # C-order input, best iteration order a = arange(6, dtype='i4').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # F-order input, best iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).T i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # Non-contiguous input, C iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], order='C', op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, (32, 16, 4)) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_opaxes(): # Specifying op_axes should work a = arange(24, dtype='i4').reshape(2, 3, 4) i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], op_dtypes=[np.dtype('u4'), None], op_axes=[[1, 2, 0], None]) assert_equal(i.operands[0].shape, (4, 2, 3)) assert_equal(i.operands[0].strides, (4, 48, 16)) assert_equal(i.operands[0].dtype, np.dtype('u4')) def test_iter_allocate_output_types_promotion(): # Check type promotion of automatic outputs i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) def test_iter_allocate_output_types_byte_order(): # Verify the rules for byte order changes # When there's just one input, the output type exactly matches a = array([3], dtype='u4').newbyteorder() i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']]) assert_equal(i.dtypes[0], i.dtypes[1]) # With two or more inputs, the output type is in native byte order i = nditer([a, a, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_(i.dtypes[0] != i.dtypes[2]) assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], [['writeonly', 'allocate']] + [['readonly']]*4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) def test_iter_allocate_output_subtype(): # Make sure that the subtype with priority wins class MyNDArray(np.ndarray): __array_priority__ = 15 # subclass vs ndarray a = np.array([[1, 2], [3, 4]]).view(MyNDArray) b = np.arange(4).reshape(2, 2).T i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_equal(type(a), type(i.operands[2])) assert_(type(b) is not type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) # If subtypes are disabled, we should get back an ndarray. i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']]) assert_equal(type(b), type(i.operands[2])) assert_(type(a) is not type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) def test_iter_allocate_output_errors(): # Check that the iterator will throw errors for bad output allocations # Need an input if no output data type is specified a = arange(6) assert_raises(TypeError, nditer, [a, None], [], [['writeonly'], ['writeonly', 'allocate']]) # Allocated output should be flagged for writing assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['allocate', 'readonly']]) # Allocated output can't have buffering without delayed bufalloc assert_raises(ValueError, nditer, [a, None], ['buffered'], ['allocate', 'readwrite']) # Must specify at least one input assert_raises(ValueError, nditer, [None, None], [], [['writeonly', 'allocate'], ['writeonly', 'allocate']], op_dtypes=[np.dtype('f4'), np.dtype('f4')]) # If using op_axes, must specify all the axes a = arange(24, dtype='i4').reshape(2, 3, 4) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, np.newaxis, 1]]) # If using op_axes, the axes must be within bounds assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 3, 1]]) # If using op_axes, there can't be duplicates assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 2, 1, 0]]) def test_iter_remove_axis(): a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) i.remove_axis(1) assert_equal([x for x in i], a[:, 0,:].ravel()) a = a[::-1,:,:] i = nditer(a, ['multi_index']) i.remove_axis(0) assert_equal([x for x in i], a[0,:,:].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) assert_equal(i.ndim, 3) assert_equal(i.shape, (2, 3, 4)) assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce before = [x for x in i] i.remove_multi_index() after = [x for x in i] assert_equal(before, after) assert_equal(i.ndim, 1) assert_raises(ValueError, lambda i:i.shape, i) assert_equal(i.itviews[0].shape, (24,)) # Removing the inner loop means there's just one iteration i.reset() assert_equal(i.itersize, 24) assert_equal(i[0].shape, tuple()) i.enable_external_loop() assert_equal(i.itersize, 24) assert_equal(i[0].shape, (24,)) assert_equal(i.value, arange(24)) def test_iter_iterindex(): # Make sure iterindex works buffersize = 5 a = arange(24).reshape(4, 3, 2) for flags in ([], ['buffered']): i = nditer(a, flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) i = nditer(a, flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 5 assert_equal(iter_iterindices(i), list(range(5, 24))) i = nditer(a[::-1], flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 9 assert_equal(iter_iterindices(i), list(range(9, 24))) i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 13 assert_equal(iter_iterindices(i), list(range(13, 24))) i = nditer(a[::1, ::-1], flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 23 assert_equal(iter_iterindices(i), list(range(23, 24))) i.reset() i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) def test_iter_iterrange(): # Make sure getting and resetting the iterrange works buffersize = 5 a = arange(24, dtype='i4').reshape(4, 3, 2) a_fort = a.ravel(order='F') i = nditer(a, ['ranged'], ['readonly'], order='F', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) def get_array(i): val = np.array([], dtype='f8') for x in i: val = np.concatenate((val, x)) return val i = nditer(a, ['ranged', 'buffered', 'external_loop'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal(get_array(i), a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal(get_array(i), a_fort[r[0]:r[1]]) def test_iter_buffering(): # Test buffering with several buffer sizes and types arrays = [] # F-order swapped array arrays.append(np.arange(24, dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4*16+1,), dtype='i1')[1:] a.dtype = 'i4' a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) for a in arrays: for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): vals = [] i = nditer(a, ['buffered', 'external_loop'], [['readonly', 'nbo', 'aligned']], order='C', casting='equiv', buffersize=buffersize) while not i.finished: assert_(i[0].size <= buffersize) vals.append(i[0].copy()) i.iternext() assert_equal(np.concatenate(vals), a.ravel(order='C')) def test_iter_write_buffering(): # Test that buffering of writes is working # F-order swapped array a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() i = nditer(a, ['buffered'], [['readwrite', 'nbo', 'aligned']], casting='equiv', order='C', buffersize=16) x = 0 with i: while not i.finished: i[0] = x x += 1 i.iternext() assert_equal(a.ravel(order='C'), np.arange(24)) def test_iter_buffering_delayed_alloc(): # Test that delaying buffer allocation works a = np.arange(6) b = np.arange(1, dtype='f4') i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], ['readwrite'], casting='unsafe', op_dtypes='f4') assert_(i.has_delayed_bufalloc) assert_raises(ValueError, lambda i:i.multi_index, i) assert_raises(ValueError, lambda i:i[0], i) assert_raises(ValueError, lambda i:i[0:2], i) def assign_iter(i): i[0] = 0 assert_raises(ValueError, assign_iter, i) i.reset() assert_(not i.has_delayed_bufalloc) assert_equal(i.multi_index, (0,)) with i: assert_equal(i[0], 0) i[1] = 1 assert_equal(i[0:2], [0, 1]) assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast a = np.arange(10, dtype='f4') i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap a = np.arange(10, dtype='f4').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) with suppress_warnings() as sup: sup.filter(np.ComplexWarning) a = np.arange(10, dtype='f8').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='unsafe', op_dtypes=[np.dtype('c8').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f8')) def test_iter_buffered_cast_byteswapped_complex(): # Test that buffering can handle a cast which requires swap->cast->copy a = np.arange(10, dtype='c8').newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype='c8') a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f4')], buffersize=7) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types # simple -> struct type (duplicates the value) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.arange(3, dtype='f4') + 0.5 i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [np.array(x) for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) # object -> struct type sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.zeros((3,), dtype='O') a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) if HAS_REFCOUNT: rc = sys.getrefcount(a[0]) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [x.copy() for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(a[0]), rc) # single-field struct type -> simple sdt = [('a', 'f4')] a = np.array([(5.5,), (8,)], dtype=sdt) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4') assert_equal([x_[()] for x_ in i], [5, 8]) # make sure multi-field struct type -> simple doesn't work sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) assert_raises(TypeError, lambda: ( nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4'))) # struct type -> struct type (field-wise copy) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) assert_equal([np.array(x_) for x_ in i], [np.array((1, 2, 3), dtype=sdt2), np.array((4, 5, 6), dtype=sdt2)]) # make sure struct type -> struct type with different # number of fields fails sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) assert_raises(ValueError, lambda : ( nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2))) def test_iter_buffered_cast_subarray(): # Tests buffering of subarrays # one element -> many (copies it to all) sdt1 = [('a', 'f4')] sdt2 = [('a', 'f8', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) for x, count in zip(i, list(range(6))): assert_(np.all(x['a'] == count)) # one element -> many -> back (copies it to all) sdt1 = [('a', 'O', (1, 1))] sdt2 = [('a', 'O', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) with i: assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) with i: assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) x['a'] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'f8', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> one element (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> matching shape (straightforward copy) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a']) count += 1 # vector -> smaller vector (truncates) sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*6).reshape(6, 6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a'][:2]) count += 1 # vector -> bigger vector (pads with zeros) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2], a[count]['a']) assert_equal(x['a'][2:], [0, 0, 0, 0]) count += 1 # vector -> matrix (broadcasts) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][0], a[count]['a']) assert_equal(x['a'][1], a[count]['a']) count += 1 # vector -> matrix (broadcasts and zero-pads) sdt1 = [('a', 'f8', (2, 1))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2, 1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) assert_equal(x['a'][2,:], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) sdt1 = [('a', 'f8', (2, 3))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2*3).reshape(6, 2, 3) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) assert_equal(x['a'][2,:], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): # Writing back from a buffer cannot combine elements # a needs write buffering, but had a broadcast dimension a = np.arange(6).reshape(2, 3, 1) b = np.arange(12).reshape(2, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') # But if a is readonly, it's fine nditer([a, b], ['buffered', 'external_loop'], [['readonly'], ['writeonly']], order='C') # If a has just one element, it's fine too (constant 0 stride, a reduction) a = np.arange(1).reshape(1, 1, 1) nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], [['readwrite'], ['writeonly']], order='C') # check that it fails on other dimensions too a = np.arange(6).reshape(1, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') a = np.arange(4).reshape(2, 1, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') def test_iter_buffering_string(): # Safe casting disallows shrinking strings a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) assert_equal(a.dtype, np.dtype('S4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='S2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') assert_equal(i[0], b'abc') assert_equal(i[0].dtype, np.dtype('S6')) a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_) assert_equal(a.dtype, np.dtype('U4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='U2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') assert_equal(i[0], u'abc') assert_equal(i[0].dtype, np.dtype('U6')) def test_iter_buffering_growinner(): # Test that the inner loop grows when no buffering is needed a = np.arange(30) i = nditer(a, ['buffered', 'growinner', 'external_loop'], buffersize=5) # Should end up with just one inner loop here assert_equal(i[0].size, a.size) @pytest.mark.slow def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. a = np.arange(2*3**5)[3**5:3**5+1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] # wrong dtype to force buffering op_dtypes = [float, a.dtype] def get_params(): for xs in range(-3**2, 3**2 + 1): for ys in range(xs, 3**2 + 1): for op_axes in op_axes_list: # last stride is reduced and because of that not # important for this test, as it is the inner stride. strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) for skip in [0, 1]: yield arr, op_axes, skip for arr, op_axes, skip in get_params(): nditer2 = np.nditer([arr.copy(), None], op_axes=op_axes, flags=flags, op_flags=op_flags, op_dtypes=op_dtypes) with nditer2: nditer2.operands[-1][...] = 0 nditer2.reset() nditer2.iterindex = skip for (a2_in, b2_in) in nditer2: b2_in += a2_in.astype(np.int_) comp_res = nditer2.operands[-1] for bufsize in range(0, 3**3): nditer1 = np.nditer([arr, None], op_axes=op_axes, flags=flags, op_flags=op_flags, buffersize=bufsize, op_dtypes=op_dtypes) with nditer1: nditer1.operands[-1][...] = 0 nditer1.reset() nditer1.iterindex = skip for (a1_in, b1_in) in nditer1: b1_in += a1_in.astype(np.int_) res = nditer1.operands[-1] assert_array_equal(res, comp_res) def test_iter_no_broadcast(): # Test that the no_broadcast flag works a = np.arange(24).reshape(2, 3, 4) b = np.arange(6).reshape(2, 3, 1) c = np.arange(12).reshape(3, 4) nditer([a, b, c], [], [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) class TestIterNested: def test_basic(self): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_reorder(self): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) # In 'K' order (default), it gets reordered i, j = np.nested_iters(a, [[0], [2, 1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, it doesn't i, j = np.nested_iters(a, [[0], [2, 1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) def test_flip_axes(self): # Test nested iteration with negative axes a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] # In 'K' order (default), the axes all get flipped i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, flipping axes is disabled i, j = np.nested_iters(a, [[0], [1, 2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) i, j = np.nested_iters(a, [[0, 1], [2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) i, j = np.nested_iters(a, [[0, 2], [1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) def test_broadcast(self): # Test nested iteration with broadcasting a = arange(2).reshape(2, 1) b = arange(3).reshape(1, 3) i, j = np.nested_iters([a, b], [[0], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) i, j = np.nested_iters([a, b], [[1], [0]]) vals = [list(j) for _ in i] assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) def test_dtype_copy(self): # Test nested iteration with a copy to change dtype # copy a = arange(6, dtype='i4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readonly', 'copy'], op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) vals = None # writebackifcopy - using context manager a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') with i, j: assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[0, 1, 2], [3, 4, 5]]) assert_equal(a, [[1, 2, 3], [4, 5, 6]]) # writebackifcopy - using close() a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[0, 1, 2], [3, 4, 5]]) i.close() j.close() assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_dtype_buffered(self): # Test nested iteration with buffering to change dtype a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_0d(self): a = np.arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[], [1, 0, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [list(j) for _ in i] assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] for x in i: for y in j: vals.append([z for z in k]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_dtype_buffered(self): # Test nested iteration with buffering to change dtype a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', op_dtypes='f8') with i, j: assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): a = np.arange(6) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) a = np.arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, None], ['external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) def test_iter_reduction(): # Test doing reductions with the iterator a = np.arange(6) i = nditer([a, None], ['reduce_ok'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) # Need to initialize the output operand to the addition unit with i: i.operands[1][...] = 0 # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) a = np.arange(6).reshape(2, 3) i = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) # Need to initialize the output operand to the addition unit with i: i.operands[1][...] = 0 # Reduction shape/strides for the output assert_equal(i[1].shape, (6,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: # Use a for loop instead of ``y[...] += x`` # (equivalent to ``y[...] = y[...].copy() + x``), # because y has zero strides we use for the reduction for j in range(len(y)): y[j] += x[j] # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) # This is a tricky reduction case for the buffering double loop # to handle a = np.ones((2, 3, 5)) it1 = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]]) it2 = nditer([a, None], ['reduce_ok', 'external_loop', 'buffered', 'delay_bufalloc'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]], buffersize=10) with it1, it2: it1.operands[1].fill(0) it2.operands[1].fill(0) it2.reset() for x in it1: x[1][...] += x[0] for x in it2: x[1][...] += x[0] assert_equal(it1.operands[1], it2.operands[1]) assert_equal(it2.operands[1].sum(), a.size) def test_iter_buffering_reduction(): # Test doing buffered reductions with the iterator a = np.arange(6) b = np.array(0., dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0], [-1]]) with i: assert_equal(i[1].dtype, np.dtype('f8')) assert_(i[1].dtype != b.dtype) # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(b, np.sum(a)) a = np.arange(6).reshape(2, 3) b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0, 1], [0, -1]]) # Reduction shape/strides for the output with i: assert_equal(i[1].shape, (3,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: # Use a for loop instead of ``y[...] += x`` # (equivalent to ``y[...] = y[...].copy() + x``), # because y has zero strides we use for the reduction for j in range(len(y)): y[j] += x[j] assert_equal(b, np.sum(a, axis=1)) # Iterator inner double loop was wrong on this one p = np.arange(2) + 1 it = np.nditer([p, None], ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[-1, 0], [-1, -1]], itershape=(2, 2)) with it: it.operands[1].fill(0) it.reset() assert_equal(it[0], [1, 2, 1, 2]) # Iterator inner loop should take argument contiguity into account x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) x[...] = np.arange(x.size).reshape(x.shape) y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) y_base_copy = y_base.copy() y = y_base[::2,:,None] it = np.nditer([y, x], ['buffered', 'external_loop', 'reduce_ok'], [['readwrite'], ['readonly']]) with it: for a, b in it: a.fill(2) assert_equal(y_base[1::2], y_base_copy[1::2]) assert_equal(y_base[::2], 2) def test_iter_buffering_reduction_reuse_reduce_loops(): # There was a bug triggering reuse of the reduce loop inappropriately, # which caused processing to happen in unnecessarily small chunks # and overran the buffer. a = np.zeros((2, 7)) b = np.zeros((1, 7)) it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], op_flags=[['readonly'], ['readwrite']], buffersize=5) with it: bufsizes = [x.shape[0] for x, y in it] assert_equal(bufsizes, [5, 2, 5, 2]) assert_equal(sum(bufsizes), a.size) def test_iter_writemasked_badinput(): a = np.zeros((2, 3)) b = np.zeros((3,)) m = np.array([[True, True, False], [False, True, False]]) m2 = np.array([True, True, False]) m3 = np.array([0, 1, 1], dtype='u1') mbad1 = np.array([0, 1, 1], dtype='i1') mbad2 = np.array([0, 1, 1], dtype='f4') # Need an 'arraymask' if any operand is 'writemasked' assert_raises(ValueError, nditer, [a, m], [], [['readwrite', 'writemasked'], ['readonly']]) # A 'writemasked' operand must not be readonly assert_raises(ValueError, nditer, [a, m], [], [['readonly', 'writemasked'], ['readonly', 'arraymask']]) # 'writemasked' and 'arraymask' may not be used together assert_raises(ValueError, nditer, [a, m], [], [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) # 'arraymask' may only be specified once assert_raises(ValueError, nditer, [a, m, m2], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask'], ['readonly', 'arraymask']]) # An 'arraymask' with nothing 'writemasked' also doesn't make sense assert_raises(ValueError, nditer, [a, m], [], [['readwrite'], ['readonly', 'arraymask']]) # A writemasked reduction requires a similarly smaller mask assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # But this should work with a smaller/equal mask to the reduction operand np.nditer([a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # The arraymask itself cannot be a reduction assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readwrite', 'arraymask']]) # A uint8 mask is ok too np.nditer([a, m3], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # An int8 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # A float32 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') def test_iter_writemasked(): a = np.zeros((3,), dtype='f8') msk = np.array([True, True, False]) # When buffering is unused, 'writemasked' effectively does nothing. # It's up to the user of the iterator to obey the requested semantics. it = np.nditer([a, msk], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) with it: for x, m in it: x[...] = 1 # Because we violated the semantics, all the values became 1 assert_equal(a, [1, 1, 1]) # Even if buffering is enabled, we still may be accessing the array # directly. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) with it: for x, m in it: x[...] = 2.5 # Because we violated the semantics, all the values became 2.5 assert_equal(a, [2.5, 2.5, 2.5]) # If buffering will definitely happening, for instance because of # a cast, only the items selected by the mask will be copied back from # the buffer. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['i8', None], casting='unsafe') with it: for x, m in it: x[...] = 3 # Even though we violated the semantics, only the selected values # were copied back assert_equal(a, [3, 3, 2.5]) def test_iter_non_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", "iterationneedsapi", "has_multi_index", "has_index", "dtypes", "ndim", "nop", "itersize", "finished"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = [ "multi_index", "index", "iterrange", "iterindex"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_element_deletion(): it = np.nditer(np.ones(3)) try: del it[1] del it[1:2] except TypeError: pass except Exception: raise AssertionError def test_iter_allocated_array_dtypes(): # If the dtype of an allocated output has a shape, the shape gets # tacked onto the end of the result. it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) for a, b in it: b[0] = a - 1 b[1] = a + 1 assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) # Make sure this works for scalars too it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) for a, b, c in it: c[0, 0] = a - b c[0, 1] = a + b c[1, 0] = a * b c[1, 1] = a / b assert_equal(it.operands[2], [[8, 12], [20, 5]]) def test_0d_iter(): # Basic test for iteration of 0-d arrays: i = nditer([2, 3], ['multi_index'], [['readonly']]*2) assert_equal(i.ndim, 0) assert_equal(next(i), (2, 3)) assert_equal(i.multi_index, ()) assert_equal(i.iterindex, 0) assert_raises(StopIteration, next, i) # test reset: i.reset() assert_equal(next(i), (2, 3)) assert_raises(StopIteration, next, i) # test forcing to 0-d i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) assert_equal(i.ndim, 0) assert_equal(len(i), 1) i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()], itershape=()) assert_equal(i.ndim, 0) assert_equal(len(i), 1) # passing an itershape alone is not enough, the op_axes are also needed with assert_raises(ValueError): nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=()) # Test a more complex buffered casting case (same as another test above) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.array(0.5, dtype='f4') i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = next(i) assert_equal(vals['a'], 0.5) assert_equal(vals['b'], 0) assert_equal(vals['c'], [[(0.5)]*3]*2) assert_equal(vals['d'], 0.5) def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to # give a legal array. size = np.iinfo(np.intp).max // 1024 arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) assert_raises(ValueError, nditer, (arr, arr[:, None])) # test the same for multiindex. That may get more interesting when # removing 0 dimensional axis is allowed (since an iterator can grow then) assert_raises(ValueError, nditer, (arr, arr[:, None]), flags=['multi_index']) def test_iter_too_large_with_multiindex(): # When a multi index is being tracked, the error is delayed this # checks the delayed error messages and getting below that by # removing an axis. base_size = 2**10 num = 1 while base_size**num < np.iinfo(np.intp).max: num += 1 shape_template = [1, 1] * num arrays = [] for i in range(num): shape = shape_template[:] shape[i * 2] = 2**10 arrays.append(np.empty(shape)) arrays = tuple(arrays) # arrays are now too large to be broadcast. The different modes test # different nditer functionality with or without GIL. for mode in range(6): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, -1, mode) # but if we do nothing with the nditer, it can be constructed: _multiarray_tests.test_nditer_too_large(arrays, -1, 7) # When an axis is removed, things should work again (half the time): for i in range(num): for mode in range(6): # an axis with size 1024 is removed: _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) # an axis with size 1 is removed: with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with it: it.operands[0][:] = 100 assert_equal(au, 100) # do it again, this time raise an error, it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) try: with it: assert_equal(au.flags.writeable, False) it.operands[0][:] = 0 raise ValueError('exit context manager on exception') except: pass assert_equal(au, 0) assert_equal(au.flags.writeable, True) # cannot reuse i outside context manager assert_raises(ValueError, getattr, it, 'operands') it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with it: x = it.operands[0] x[:] = 6 assert_(x.flags.writebackifcopy) assert_equal(au, 6) assert_(not x.flags.writebackifcopy) x[:] = 123 # x.data still valid assert_equal(au, 6) # but not connected to au it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # reentering works with it: with it: for x in it: x[...] = 123 it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # make sure exiting the inner context manager closes the iterator with it: with it: for x in it: x[...] = 123 assert_raises(ValueError, getattr, it, 'operands') # do not crash if original data array is decrefed it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) del au with it: for x in it: x[...] = 123 # make sure we cannot reenter the closed iterator enter = it.__enter__ assert_raises(RuntimeError, enter) def test_close_equivalent(): ''' using a context amanger and using nditer.close are equivalent ''' def add_close(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) for (a, b, c) in it: addop(a, b, out=c) ret = it.operands[2] it.close() return ret def add_context(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) with it: for (a, b, c) in it: addop(a, b, out=c) return it.operands[2] z = add_close(range(5), range(5)) assert_equal(z, range(0, 10, 2)) z = add_context(range(5), range(5)) assert_equal(z, range(0, 10, 2)) def test_close_raises(): it = np.nditer(np.arange(3)) assert_equal (next(it), 0) it.close() assert_raises(StopIteration, next, it) assert_raises(ValueError, getattr, it, 'operands') @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() with suppress_warnings() as sup: sup.record(RuntimeWarning) it = np.nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) del it assert len(sup.log) == 1
abalkin/numpy
numpy/core/tests/test_nditer.py
numpy/core/tests/test_indexing.py
""" Helper functions for interacting with the shell, and consuming shell-style parameters provided in config files. """ import os import shlex import subprocess try: from shlex import quote except ImportError: from pipes import quote __all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] class CommandLineParser: """ An object that knows how to split and join command-line arguments. It must be true that ``argv == split(join(argv))`` for all ``argv``. The reverse neednt be true - `join(split(cmd))` may result in the addition or removal of unnecessary escaping. """ @staticmethod def join(argv): """ Join a list of arguments into a command line string """ raise NotImplementedError @staticmethod def split(cmd): """ Split a command line string into a list of arguments """ raise NotImplementedError class WindowsParser: """ The parsing behavior used by `subprocess.call("string")` on Windows, which matches the Microsoft C/C++ runtime. Note that this is _not_ the behavior of cmd. """ @staticmethod def join(argv): # note that list2cmdline is specific to the windows syntax return subprocess.list2cmdline(argv) @staticmethod def split(cmd): import ctypes # guarded import for systems without ctypes try: ctypes.windll except AttributeError: raise NotImplementedError # Windows has special parsing rules for the executable (no quotes), # that we do not care about - insert a dummy element if not cmd: return [] cmd = 'dummy ' + cmd CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) nargs = ctypes.c_int() lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) args = [lpargs[i] for i in range(nargs.value)] assert not ctypes.windll.kernel32.LocalFree(lpargs) # strip the element we inserted assert args[0] == "dummy" return args[1:] class PosixParser: """ The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. """ @staticmethod def join(argv): return ' '.join(quote(arg) for arg in argv) @staticmethod def split(cmd): return shlex.split(cmd, posix=True) if os.name == 'nt': NativeParser = WindowsParser elif os.name == 'posix': NativeParser = PosixParser
import sys import pytest import numpy as np import numpy.core._multiarray_tests as _multiarray_tests from numpy import array, arange, nditer, all from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, suppress_warnings ) def iter_multi_index(i): ret = [] while not i.finished: ret.append(i.multi_index) i.iternext() return ret def iter_indices(i): ret = [] while not i.finished: ret.append(i.index) i.iternext() return ret def iter_iterindices(i): ret = [] while not i.finished: ret.append(i.iterindex) i.iternext() return ret @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_iter_refcount(): # Make sure the iterator doesn't leak # Basic a = arange(6) dt = np.dtype('f4').newbyteorder() rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) with nditer(a, [], [['readwrite', 'updateifcopy']], casting='unsafe', op_dtypes=[dt]) as it: assert_(not it.iterationneedsapi) assert_(sys.getrefcount(a) > rc_a) assert_(sys.getrefcount(dt) > rc_dt) # del 'it' it = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) # With a copy a = arange(6, dtype='f4') dt = np.dtype('f4') rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) it = nditer(a, [], [['readwrite']], op_dtypes=[dt]) rc2_a = sys.getrefcount(a) rc2_dt = sys.getrefcount(dt) it2 = it.copy() assert_(sys.getrefcount(a) > rc2_a) assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) assert_equal(sys.getrefcount(dt), rc2_dt) it2 = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) del it2 # avoid pyflakes unused variable warning def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, [], [['readonly']]) assert_equal([x for x in i], a) # Fortran-order i = nditer(aview.T, [], [['readonly']]) assert_equal([x for x in i], a) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) assert_equal([x for x in i], a) def test_iter_c_order(): # Test forcing C order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='C') assert_equal([x for x in i], aview.ravel(order='C')) # Fortran-order i = nditer(aview.T, order='C') assert_equal([x for x in i], aview.T.ravel(order='C')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='C') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): # Test forcing F order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='F') assert_equal([x for x in i], aview.ravel(order='F')) # Fortran-order i = nditer(aview.T, order='F') assert_equal([x for x in i], aview.T.ravel(order='F')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='F') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): # Test forcing any contiguous (C or F) order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='A') assert_equal([x for x in i], aview.ravel(order='A')) # Fortran-order i = nditer(aview.T, order='A') assert_equal([x for x in i], aview.T.ravel(order='A')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='A') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='A')) def test_iter_best_order_multi_index_1d(): # The multi-indices should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) # 1D reversed order i = nditer(a[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) def test_iter_best_order_multi_index_2d(): # The multi-indices should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) def test_iter_best_order_multi_index_3d(): # The multi-indices should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_c_index_2d(): # The C index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) def test_iter_best_order_c_index_3d(): # The C index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) def test_iter_best_order_f_index_1d(): # The Fortran index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_f_index_2d(): # The Fortran index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) def test_iter_best_order_f_index_3d(): # The Fortran index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) def test_iter_no_inner_full_coalesce(): # Check no_inner iterators which coalesce into a single inner loop for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: size = np.prod(shape) a = arange(size) # Test each combination of forward and backwards indexing for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Fortran-order i = nditer(aview.T, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) def test_iter_no_inner_dim_coalescing(): # Check no_inner iterators whose dimensions may not coalesce completely # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension a = arange(24).reshape(2, 3, 4)[:,:, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) a = arange(24).reshape(2, 3, 4)[:, :-1,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) a = arange(24).reshape(2, 3, 4)[:-1,:,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) # Even with lots of 1-sized dimensions, should still coalesce a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (24,)) def test_iter_dim_coalescing(): # Check that the correct number of dimensions are coalesced # Tracking a multi-index disables coalescing a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(i.ndim, 3) # A tracked index can allow coalescing if it's compatible with the array a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, ['c_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['f_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) assert_equal(i.ndim, 3) # When C or F order is forced, coalescing may still occur a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, order='C') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='C') assert_equal(i.ndim, 3) i = nditer(a3d, order='F') assert_equal(i.ndim, 3) i = nditer(a3d.T, order='F') assert_equal(i.ndim, 1) i = nditer(a3d, order='A') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='A') assert_equal(i.ndim, 1) def test_iter_broadcasting(): # Standard NumPy broadcasting rules # 1D with scalar i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (6,)) # 2D with scalar i = nditer([arange(6).reshape(2, 3), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 1D i = nditer([arange(6).reshape(2, 3), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) i = nditer([arange(2).reshape(2, 1), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 2D i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 3D with scalar i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 1D i = nditer([arange(3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 2D i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 3D i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*3) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) def test_iter_itershape(): # Check that allocated outputs work with a specified shape a = np.arange(6, dtype='i2').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (2, 3, 4)) assert_equal(i.operands[1].strides, (24, 8, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (8, 24, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], order='F', op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (2, 6, 12)) # If we specify 1 in the itershape, it shouldn't allow broadcasting # of that dimension to a bigger value assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, 1, 4)) # Test bug that for no op_axes but itershape, they are NULLed correctly i = np.nditer([np.ones(2), None, None], itershape=(2,)) def test_iter_broadcasting_errors(): # Check that errors are thrown for bad broadcasting shapes # 1D with 1D assert_raises(ValueError, nditer, [arange(2), arange(3)], [], [['readonly']]*2) # 2D with 1D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(2)], [], [['readonly']]*2) # 2D with 2D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], [], [['readonly']]*2) # 3D with 3D assert_raises(ValueError, nditer, [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) # Verify that the error message mentions the right shapes try: nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 3), arange(6).reshape(2, 3)], [], [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the 3rd operand assert_(msg.find('(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) # The message should contain the broadcast shape assert_(msg.find('(1,2,3)') >= 0, 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) try: nditer([arange(6).reshape(2, 3), arange(2)], [], [['readonly'], ['readonly']], op_axes=[[0, 1], [0, np.newaxis]], itershape=(4, 3)) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain "shape->remappedshape" for each operand assert_(msg.find('(2,3)->(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) assert_(msg.find('(2,)->(2,newaxis)') >= 0, ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) try: nditer([np.zeros((2, 1, 1)), np.zeros((2,))], [], [['writeonly', 'no_broadcast'], ['readonly']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the bad operand assert_(msg.find('(2,1,1)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) # The message should contain the broadcast shape assert_(msg.find('(2,1,2)') >= 0, 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) def test_iter_flags_errors(): # Check that bad combinations of flags produce errors a = arange(6) # Not enough operands assert_raises(ValueError, nditer, [], [], []) # Too many operands assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) # Bad order parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') # Bad casting parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') # op_flags must match ops assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, ['c_index', 'f_index'], [['readonly']]) # Inner iteration and multi-indices/indices are incompatible assert_raises(ValueError, nditer, a, ['external_loop', 'multi_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'c_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'f_index'], [['readonly']]) # Must specify exactly one of readwrite/readonly/writeonly per operand assert_raises(ValueError, nditer, a, [], [[]]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly', 'readwrite']]) # Python scalars are always readonly assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) # Array scalars are always readonly assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) # Check readonly array a.flags.writeable = False assert_raises(ValueError, nditer, a, [], [['writeonly']]) assert_raises(ValueError, nditer, a, [], [['readwrite']]) a.flags.writeable = True # Multi-indices available only with the multi_index flag i = nditer(arange(6), [], [['readonly']]) assert_raises(ValueError, lambda i:i.multi_index, i) # Index available only with an index flag assert_raises(ValueError, lambda i:i.index, i) # GotoCoords and GotoIndex incompatible with buffering or no_inner def assign_multi_index(i): i.multi_index = (0,) def assign_index(i): i.index = 0 def assign_iterindex(i): i.iterindex = 0 def assign_iterrange(i): i.iterrange = (0, 1) i = nditer(arange(6), ['external_loop']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterindex, i) assert_raises(ValueError, assign_iterrange, i) i = nditer(arange(6), ['buffered']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterrange, i) # Can't iterate if size is zero assert_raises(ValueError, nditer, np.array([])) def test_iter_slice(): a, b, c = np.arange(3), np.arange(3), np.arange(3.) i = nditer([a, b, c], [], ['readwrite']) with i: i[0:2] = (3, 3) assert_equal(a, [3, 1, 2]) assert_equal(b, [3, 1, 2]) assert_equal(c, [0, 1, 2]) i[1] = 12 assert_equal(i[0:2], [3, 12]) def test_iter_assign_mapping(): a = np.arange(24, dtype='f8').reshape(2, 3, 4).T it = np.nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) with it: it.operands[0][...] = 3 it.operands[0][...] = 14 assert_equal(a, 14) it = np.nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) with it: x = it.operands[0][-1:1] x[...] = 14 it.operands[0][...] = -1234 assert_equal(a, -1234) # check for no warnings on dealloc x = None it = None def test_iter_nbo_align_contig(): # Check that byte order, alignment, and contig changes work # Byte order change by requesting a specific dtype a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) i = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with i: # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 assert_equal(au, [2]*6) del i # should not raise a warning # Byte order change by requesting NBO a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') as i: # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 12345 i.operands[0][:] = 2 assert_equal(au, [2]*6) # Unaligned input a = np.zeros((6*4+1,), dtype='i1')[1:] a.dtype = 'f4' a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy i = nditer(a, [], [['readonly']]) assert_(not i.operands[0].flags.aligned) assert_equal(i.operands[0], a) # With 'aligned', should make a copy with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: assert_(i.operands[0].flags.aligned) # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.operands[0], a) i.operands[0][:] = 3 assert_equal(a, [3]*6) # Discontiguous input a = arange(12) # If it is contiguous, shouldn't copy i = nditer(a[:6], [], [['readonly']]) assert_(i.operands[0].flags.contiguous) assert_equal(i.operands[0], a[:6]) # If it isn't contiguous, should buffer i = nditer(a[::2], ['buffered', 'external_loop'], [['readonly', 'contig']], buffersize=10) assert_(i[0].flags.contiguous) assert_equal(i[0], a[::2]) def test_iter_array_cast(): # Check that arrays are cast as requested # No cast 'f4' -> 'f4' a = np.arange(6, dtype='f4').reshape(2, 3) i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) with i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Byte-order cast '<f4' -> '>f4' a = np.arange(6, dtype='<f4').reshape(2, 3) with nditer(a, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('>f4')]) as i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('>f4')) # Safe case 'f4' -> 'f8' a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. assert_equal(i.operands[0].strides, (96, 8, 32)) a = a[::-1,:, ::-1] i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) assert_equal(i.operands[0].strides, (96, 8, 32)) # Same-kind cast 'f8' -> 'f4' -> 'f8' a = np.arange(24, dtype='f8').reshape(2, 3, 4).T with nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) as i: assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) assert_equal(i.operands[0].strides, (4, 16, 48)) # Check that WRITEBACKIFCOPY is activated at exit i.operands[0][2, 1, 1] = -12.5 assert_(a[2, 1, 1] != -12.5) assert_equal(a[2, 1, 1], -12.5) a = np.arange(6, dtype='i4')[::-2] with nditer(a, [], [['writeonly', 'updateifcopy']], casting='unsafe', op_dtypes=[np.dtype('f4')]) as i: assert_equal(i.operands[0].dtype, np.dtype('f4')) # Even though the stride was negative in 'a', it # becomes positive in the temporary assert_equal(i.operands[0].strides, (4,)) i.operands[0][:] = [1, 2, 3] assert_equal(a, [1, 2, 3]) def test_iter_array_cast_errors(): # Check that invalid casts are caught # Need to enable copying for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly']], op_dtypes=[np.dtype('f8')]) # Also need to allow casting for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='equiv', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='no', op_dtypes=[np.dtype('f4')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # '<f4' -> '>f4' should not work with casting='no' assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('>f4')]) # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('i4')]) assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], [['writeonly', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) def test_iter_scalar_cast(): # Check that scalars are cast as requested # No cast 'f4' -> 'f4' i = nditer(np.float32(2.5), [], [['readonly']], op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Safe cast 'f4' -> 'f8' i = nditer(np.float32(2.5), [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.value.dtype, np.dtype('f8')) assert_equal(i.value, 2.5) # Same-kind cast 'f8' -> 'f4' i = nditer(np.float64(2.5), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Unsafe cast 'f8' -> 'i4' i = nditer(np.float64(3.0), [], [['readonly', 'copy']], casting='unsafe', op_dtypes=[np.dtype('i4')]) assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.value.dtype, np.dtype('i4')) assert_equal(i.value, 3) # Readonly scalars may be cast even without setting COPY or BUFFERED i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) assert_equal(i[0].dtype, np.dtype('f8')) assert_equal(i[0], 3.) def test_iter_scalar_cast_errors(): # Check that invalid casts are caught # Need to allow copying/buffering for write casts of scalars to occur assert_raises(TypeError, nditer, np.float32(2), [], [['readwrite']], op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, 2.5, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) # 'f8' -> 'f4' isn't a safe cast if the value would overflow assert_raises(TypeError, nditer, np.float64(1e60), [], [['readonly']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, np.float32(2), [], [['readonly']], casting='same_kind', op_dtypes=[np.dtype('i4')]) def test_iter_object_arrays_basic(): # Check that object arrays work obj = {'a':3,'b':'d'} a = np.array([[1, 2, 3], None, obj, None], dtype='O') if HAS_REFCOUNT: rc = sys.getrefcount(obj) # Need to allow references for object arrays assert_raises(TypeError, nditer, a) if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a, ['refs_ok'], ['readonly']) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readonly'], order='C') assert_(i.iterationneedsapi) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readwrite'], order='C') with i: for x in i: x[...] = None vals, i, x = [None]*3 if HAS_REFCOUNT: assert_(sys.getrefcount(obj) == rc-1) assert_equal(a, np.array([None]*4, dtype='O')) def test_iter_object_arrays_conversions(): # Conversions to/from objects a = np.arange(6, dtype='O') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) a = np.arange(6, dtype='i4') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) # Non-contiguous object array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) a = a['a'] a[:] = np.arange(6) i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') with i: for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) #Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') with i: ob = i[0][()] if HAS_REFCOUNT: rc = sys.getrefcount(ob) for x in i: x[...] += 1 if HAS_REFCOUNT: assert_(sys.getrefcount(ob) == rc-1) assert_equal(a, np.arange(6)+98172489) def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.dtypes[1], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('u4')) assert_equal(i.dtypes[1], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) assert_equal(i.dtypes[3], np.dtype('c16')) assert_equal(i.value, (3, -12, 2j, 9)) # When allocating outputs, other outputs aren't factored in i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.dtypes[1], np.dtype('i4')) assert_equal(i.dtypes[2], np.dtype('c16')) # But, if common data types are requested, they are i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], ['common_dtype'], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) def test_iter_copy_if_overlap(): # Ensure the iterator makes copies on read/write overlap, if requested # Copy not needed, 1 op for flag in ['readonly', 'writeonly', 'readwrite']: a = arange(10) i = nditer([a], ['copy_if_overlap'], [[flag]]) with i: assert_(i.operands[0] is a) # Copy needed, 2 ops, read-write overlap x = arange(10) a = x[1:] b = x[:-1] with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: assert_(not np.shares_memory(*i.operands)) # Copy not needed with elementwise, 2 ops, exactly same arrays x = arange(10) a = x b = x i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], ['readwrite', 'overlap_assume_elementwise']]) with i: assert_(i.operands[0] is a and i.operands[1] is b) with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) # Copy not needed, 2 ops, no overlap x = arange(10) a = x[::2] b = x[1::2] i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) assert_(i.operands[0] is a and i.operands[1] is b) # Copy needed, 2 ops, read-write overlap x = arange(4, dtype=np.int8) a = x[3:] b = x.view(np.int32)[:1] with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: assert_(not np.shares_memory(*i.operands)) # Copy needed, 3 ops, read-write overlap for flag in ['writeonly', 'readwrite']: x = np.ones([10, 10]) a = x b = x.T c = x with nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['readonly'], [flag]]) as i: a2, b2, c2 = i.operands assert_(not np.shares_memory(a2, c2)) assert_(not np.shares_memory(b2, c2)) # Copy not needed, 3 ops, read-only overlap x = np.ones([10, 10]) a = x b = x.T c = x i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['readonly'], ['readonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) # Copy not needed, 3 ops, read-only overlap x = np.ones([10, 10]) a = x b = np.ones([10, 10]) c = x.T i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['writeonly'], ['readonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) # Copy not needed, 3 ops, write-only overlap x = np.arange(7) a = x[:3] b = x[3:6] c = x[4:7] i = nditer([a, b, c], ['copy_if_overlap'], [['readonly'], ['writeonly'], ['writeonly']]) a2, b2, c2 = i.operands assert_(a is a2) assert_(b is b2) assert_(c is c2) def test_iter_op_axes(): # Check that custom axes work # Reverse the axes a = arange(6).reshape(2, 3) i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) assert_(all([x == y for (x, y) in i])) a = arange(24).reshape(2, 3, 4) i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) assert_(all([x == y for (x, y) in i])) # Broadcast 1D to any dimension a = arange(1, 31).reshape(2, 3, 5) b = arange(1, 3) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) b = arange(1, 4) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) b = arange(1, 6) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [np.newaxis, np.newaxis, 0]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting a = arange(24).reshape(2, 3, 4) b = arange(40).reshape(5, 2, 4) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting a = arange(12).reshape(3, 4) b = arange(20).reshape(4, 5) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, -1], [-1, 1]]) assert_equal(i.shape, (3, 5)) def test_iter_op_axes_errors(): # Check that custom axes throws errors for bad inputs # Wrong number of items in op_axes a = arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[2, 1], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 0], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): # Check that copying the iterator works correctly a = arange(24).reshape(2, 3, 4) # Simple iterator i = nditer(a) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Buffered iterator i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (3, 9) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (2, 18) next(i) next(i) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Casting iterator with nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='f8', buffersize=5) as i: j = i.copy() assert_equal([x[()] for x in j], a.ravel(order='F')) a = arange(24, dtype='<i4').reshape(2, 3, 4) with nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='>f8', buffersize=5) as i: j = i.copy() assert_equal([x[()] for x in j], a.ravel(order='F')) def test_iter_allocate_output_simple(): # Check that the iterator will properly allocate outputs # Simple case a = arange(6) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_buffered_readwrite(): # Allocated output with buffering + delay_bufalloc a = arange(6) i = nditer([a, None], ['buffered', 'delay_bufalloc'], [['readonly'], ['allocate', 'readwrite']]) with i: i.operands[1][:] = 1 i.reset() for x in i: x[1][...] += x[0][...] assert_equal(i.operands[1], a+1) def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order # C-order input, best iteration order a = arange(6, dtype='i4').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # F-order input, best iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).T i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # Non-contiguous input, C iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], order='C', op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, (32, 16, 4)) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_opaxes(): # Specifying op_axes should work a = arange(24, dtype='i4').reshape(2, 3, 4) i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], op_dtypes=[np.dtype('u4'), None], op_axes=[[1, 2, 0], None]) assert_equal(i.operands[0].shape, (4, 2, 3)) assert_equal(i.operands[0].strides, (4, 48, 16)) assert_equal(i.operands[0].dtype, np.dtype('u4')) def test_iter_allocate_output_types_promotion(): # Check type promotion of automatic outputs i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) def test_iter_allocate_output_types_byte_order(): # Verify the rules for byte order changes # When there's just one input, the output type exactly matches a = array([3], dtype='u4').newbyteorder() i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']]) assert_equal(i.dtypes[0], i.dtypes[1]) # With two or more inputs, the output type is in native byte order i = nditer([a, a, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_(i.dtypes[0] != i.dtypes[2]) assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], [['writeonly', 'allocate']] + [['readonly']]*4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) def test_iter_allocate_output_subtype(): # Make sure that the subtype with priority wins class MyNDArray(np.ndarray): __array_priority__ = 15 # subclass vs ndarray a = np.array([[1, 2], [3, 4]]).view(MyNDArray) b = np.arange(4).reshape(2, 2).T i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_equal(type(a), type(i.operands[2])) assert_(type(b) is not type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) # If subtypes are disabled, we should get back an ndarray. i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']]) assert_equal(type(b), type(i.operands[2])) assert_(type(a) is not type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) def test_iter_allocate_output_errors(): # Check that the iterator will throw errors for bad output allocations # Need an input if no output data type is specified a = arange(6) assert_raises(TypeError, nditer, [a, None], [], [['writeonly'], ['writeonly', 'allocate']]) # Allocated output should be flagged for writing assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['allocate', 'readonly']]) # Allocated output can't have buffering without delayed bufalloc assert_raises(ValueError, nditer, [a, None], ['buffered'], ['allocate', 'readwrite']) # Must specify at least one input assert_raises(ValueError, nditer, [None, None], [], [['writeonly', 'allocate'], ['writeonly', 'allocate']], op_dtypes=[np.dtype('f4'), np.dtype('f4')]) # If using op_axes, must specify all the axes a = arange(24, dtype='i4').reshape(2, 3, 4) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, np.newaxis, 1]]) # If using op_axes, the axes must be within bounds assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 3, 1]]) # If using op_axes, there can't be duplicates assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 2, 1, 0]]) def test_iter_remove_axis(): a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) i.remove_axis(1) assert_equal([x for x in i], a[:, 0,:].ravel()) a = a[::-1,:,:] i = nditer(a, ['multi_index']) i.remove_axis(0) assert_equal([x for x in i], a[0,:,:].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) assert_equal(i.ndim, 3) assert_equal(i.shape, (2, 3, 4)) assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce before = [x for x in i] i.remove_multi_index() after = [x for x in i] assert_equal(before, after) assert_equal(i.ndim, 1) assert_raises(ValueError, lambda i:i.shape, i) assert_equal(i.itviews[0].shape, (24,)) # Removing the inner loop means there's just one iteration i.reset() assert_equal(i.itersize, 24) assert_equal(i[0].shape, tuple()) i.enable_external_loop() assert_equal(i.itersize, 24) assert_equal(i[0].shape, (24,)) assert_equal(i.value, arange(24)) def test_iter_iterindex(): # Make sure iterindex works buffersize = 5 a = arange(24).reshape(4, 3, 2) for flags in ([], ['buffered']): i = nditer(a, flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) i = nditer(a, flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 5 assert_equal(iter_iterindices(i), list(range(5, 24))) i = nditer(a[::-1], flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 9 assert_equal(iter_iterindices(i), list(range(9, 24))) i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 13 assert_equal(iter_iterindices(i), list(range(13, 24))) i = nditer(a[::1, ::-1], flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 23 assert_equal(iter_iterindices(i), list(range(23, 24))) i.reset() i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) def test_iter_iterrange(): # Make sure getting and resetting the iterrange works buffersize = 5 a = arange(24, dtype='i4').reshape(4, 3, 2) a_fort = a.ravel(order='F') i = nditer(a, ['ranged'], ['readonly'], order='F', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) def get_array(i): val = np.array([], dtype='f8') for x in i: val = np.concatenate((val, x)) return val i = nditer(a, ['ranged', 'buffered', 'external_loop'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal(get_array(i), a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal(get_array(i), a_fort[r[0]:r[1]]) def test_iter_buffering(): # Test buffering with several buffer sizes and types arrays = [] # F-order swapped array arrays.append(np.arange(24, dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4*16+1,), dtype='i1')[1:] a.dtype = 'i4' a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) for a in arrays: for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): vals = [] i = nditer(a, ['buffered', 'external_loop'], [['readonly', 'nbo', 'aligned']], order='C', casting='equiv', buffersize=buffersize) while not i.finished: assert_(i[0].size <= buffersize) vals.append(i[0].copy()) i.iternext() assert_equal(np.concatenate(vals), a.ravel(order='C')) def test_iter_write_buffering(): # Test that buffering of writes is working # F-order swapped array a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() i = nditer(a, ['buffered'], [['readwrite', 'nbo', 'aligned']], casting='equiv', order='C', buffersize=16) x = 0 with i: while not i.finished: i[0] = x x += 1 i.iternext() assert_equal(a.ravel(order='C'), np.arange(24)) def test_iter_buffering_delayed_alloc(): # Test that delaying buffer allocation works a = np.arange(6) b = np.arange(1, dtype='f4') i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], ['readwrite'], casting='unsafe', op_dtypes='f4') assert_(i.has_delayed_bufalloc) assert_raises(ValueError, lambda i:i.multi_index, i) assert_raises(ValueError, lambda i:i[0], i) assert_raises(ValueError, lambda i:i[0:2], i) def assign_iter(i): i[0] = 0 assert_raises(ValueError, assign_iter, i) i.reset() assert_(not i.has_delayed_bufalloc) assert_equal(i.multi_index, (0,)) with i: assert_equal(i[0], 0) i[1] = 1 assert_equal(i[0:2], [0, 1]) assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast a = np.arange(10, dtype='f4') i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap a = np.arange(10, dtype='f4').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) with suppress_warnings() as sup: sup.filter(np.ComplexWarning) a = np.arange(10, dtype='f8').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='unsafe', op_dtypes=[np.dtype('c8').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f8')) def test_iter_buffered_cast_byteswapped_complex(): # Test that buffering can handle a cast which requires swap->cast->copy a = np.arange(10, dtype='c8').newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype='c8') a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16').newbyteorder()], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f4')], buffersize=7) with i: for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types # simple -> struct type (duplicates the value) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.arange(3, dtype='f4') + 0.5 i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [np.array(x) for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) # object -> struct type sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.zeros((3,), dtype='O') a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) if HAS_REFCOUNT: rc = sys.getrefcount(a[0]) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [x.copy() for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) vals, i, x = [None]*3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(a[0]), rc) # single-field struct type -> simple sdt = [('a', 'f4')] a = np.array([(5.5,), (8,)], dtype=sdt) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4') assert_equal([x_[()] for x_ in i], [5, 8]) # make sure multi-field struct type -> simple doesn't work sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) assert_raises(TypeError, lambda: ( nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4'))) # struct type -> struct type (field-wise copy) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) assert_equal([np.array(x_) for x_ in i], [np.array((1, 2, 3), dtype=sdt2), np.array((4, 5, 6), dtype=sdt2)]) # make sure struct type -> struct type with different # number of fields fails sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) assert_raises(ValueError, lambda : ( nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2))) def test_iter_buffered_cast_subarray(): # Tests buffering of subarrays # one element -> many (copies it to all) sdt1 = [('a', 'f4')] sdt2 = [('a', 'f8', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) for x, count in zip(i, list(range(6))): assert_(np.all(x['a'] == count)) # one element -> many -> back (copies it to all) sdt1 = [('a', 'O', (1, 1))] sdt2 = [('a', 'O', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) with i: assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) with i: assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) x['a'] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'f8', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> one element (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> matching shape (straightforward copy) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a']) count += 1 # vector -> smaller vector (truncates) sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*6).reshape(6, 6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a'][:2]) count += 1 # vector -> bigger vector (pads with zeros) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2], a[count]['a']) assert_equal(x['a'][2:], [0, 0, 0, 0]) count += 1 # vector -> matrix (broadcasts) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][0], a[count]['a']) assert_equal(x['a'][1], a[count]['a']) count += 1 # vector -> matrix (broadcasts and zero-pads) sdt1 = [('a', 'f8', (2, 1))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2, 1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) assert_equal(x['a'][2,:], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) sdt1 = [('a', 'f8', (2, 3))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2*3).reshape(6, 2, 3) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) assert_equal(x['a'][2,:], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): # Writing back from a buffer cannot combine elements # a needs write buffering, but had a broadcast dimension a = np.arange(6).reshape(2, 3, 1) b = np.arange(12).reshape(2, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') # But if a is readonly, it's fine nditer([a, b], ['buffered', 'external_loop'], [['readonly'], ['writeonly']], order='C') # If a has just one element, it's fine too (constant 0 stride, a reduction) a = np.arange(1).reshape(1, 1, 1) nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], [['readwrite'], ['writeonly']], order='C') # check that it fails on other dimensions too a = np.arange(6).reshape(1, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') a = np.arange(4).reshape(2, 1, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') def test_iter_buffering_string(): # Safe casting disallows shrinking strings a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) assert_equal(a.dtype, np.dtype('S4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='S2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') assert_equal(i[0], b'abc') assert_equal(i[0].dtype, np.dtype('S6')) a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_) assert_equal(a.dtype, np.dtype('U4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='U2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') assert_equal(i[0], u'abc') assert_equal(i[0].dtype, np.dtype('U6')) def test_iter_buffering_growinner(): # Test that the inner loop grows when no buffering is needed a = np.arange(30) i = nditer(a, ['buffered', 'growinner', 'external_loop'], buffersize=5) # Should end up with just one inner loop here assert_equal(i[0].size, a.size) @pytest.mark.slow def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. a = np.arange(2*3**5)[3**5:3**5+1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] # wrong dtype to force buffering op_dtypes = [float, a.dtype] def get_params(): for xs in range(-3**2, 3**2 + 1): for ys in range(xs, 3**2 + 1): for op_axes in op_axes_list: # last stride is reduced and because of that not # important for this test, as it is the inner stride. strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) for skip in [0, 1]: yield arr, op_axes, skip for arr, op_axes, skip in get_params(): nditer2 = np.nditer([arr.copy(), None], op_axes=op_axes, flags=flags, op_flags=op_flags, op_dtypes=op_dtypes) with nditer2: nditer2.operands[-1][...] = 0 nditer2.reset() nditer2.iterindex = skip for (a2_in, b2_in) in nditer2: b2_in += a2_in.astype(np.int_) comp_res = nditer2.operands[-1] for bufsize in range(0, 3**3): nditer1 = np.nditer([arr, None], op_axes=op_axes, flags=flags, op_flags=op_flags, buffersize=bufsize, op_dtypes=op_dtypes) with nditer1: nditer1.operands[-1][...] = 0 nditer1.reset() nditer1.iterindex = skip for (a1_in, b1_in) in nditer1: b1_in += a1_in.astype(np.int_) res = nditer1.operands[-1] assert_array_equal(res, comp_res) def test_iter_no_broadcast(): # Test that the no_broadcast flag works a = np.arange(24).reshape(2, 3, 4) b = np.arange(6).reshape(2, 3, 1) c = np.arange(12).reshape(3, 4) nditer([a, b, c], [], [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) class TestIterNested: def test_basic(self): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_reorder(self): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) # In 'K' order (default), it gets reordered i, j = np.nested_iters(a, [[0], [2, 1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, it doesn't i, j = np.nested_iters(a, [[0], [2, 1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) def test_flip_axes(self): # Test nested iteration with negative axes a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] # In 'K' order (default), the axes all get flipped i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, flipping axes is disabled i, j = np.nested_iters(a, [[0], [1, 2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) i, j = np.nested_iters(a, [[0, 1], [2]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) i, j = np.nested_iters(a, [[0, 2], [1]], order='C') vals = [list(j) for _ in i] assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) def test_broadcast(self): # Test nested iteration with broadcasting a = arange(2).reshape(2, 1) b = arange(3).reshape(1, 3) i, j = np.nested_iters([a, b], [[0], [1]]) vals = [list(j) for _ in i] assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) i, j = np.nested_iters([a, b], [[1], [0]]) vals = [list(j) for _ in i] assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) def test_dtype_copy(self): # Test nested iteration with a copy to change dtype # copy a = arange(6, dtype='i4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readonly', 'copy'], op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) vals = None # writebackifcopy - using context manager a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') with i, j: assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[0, 1, 2], [3, 4, 5]]) assert_equal(a, [[1, 2, 3], [4, 5, 6]]) # writebackifcopy - using close() a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[0, 1, 2], [3, 4, 5]]) i.close() j.close() assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_dtype_buffered(self): # Test nested iteration with buffering to change dtype a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_0d(self): a = np.arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[], [1, 0, 2]]) vals = [list(j) for _ in i] assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [list(j) for _ in i] assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] for x in i: for y in j: vals.append([z for z in k]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_dtype_buffered(self): # Test nested iteration with buffering to change dtype a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', op_dtypes='f8') with i, j: assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): a = np.arange(6) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) a = np.arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, None], ['external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) def test_iter_reduction(): # Test doing reductions with the iterator a = np.arange(6) i = nditer([a, None], ['reduce_ok'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) # Need to initialize the output operand to the addition unit with i: i.operands[1][...] = 0 # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) a = np.arange(6).reshape(2, 3) i = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) # Need to initialize the output operand to the addition unit with i: i.operands[1][...] = 0 # Reduction shape/strides for the output assert_equal(i[1].shape, (6,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: # Use a for loop instead of ``y[...] += x`` # (equivalent to ``y[...] = y[...].copy() + x``), # because y has zero strides we use for the reduction for j in range(len(y)): y[j] += x[j] # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) # This is a tricky reduction case for the buffering double loop # to handle a = np.ones((2, 3, 5)) it1 = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]]) it2 = nditer([a, None], ['reduce_ok', 'external_loop', 'buffered', 'delay_bufalloc'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]], buffersize=10) with it1, it2: it1.operands[1].fill(0) it2.operands[1].fill(0) it2.reset() for x in it1: x[1][...] += x[0] for x in it2: x[1][...] += x[0] assert_equal(it1.operands[1], it2.operands[1]) assert_equal(it2.operands[1].sum(), a.size) def test_iter_buffering_reduction(): # Test doing buffered reductions with the iterator a = np.arange(6) b = np.array(0., dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0], [-1]]) with i: assert_equal(i[1].dtype, np.dtype('f8')) assert_(i[1].dtype != b.dtype) # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(b, np.sum(a)) a = np.arange(6).reshape(2, 3) b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0, 1], [0, -1]]) # Reduction shape/strides for the output with i: assert_equal(i[1].shape, (3,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: # Use a for loop instead of ``y[...] += x`` # (equivalent to ``y[...] = y[...].copy() + x``), # because y has zero strides we use for the reduction for j in range(len(y)): y[j] += x[j] assert_equal(b, np.sum(a, axis=1)) # Iterator inner double loop was wrong on this one p = np.arange(2) + 1 it = np.nditer([p, None], ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[-1, 0], [-1, -1]], itershape=(2, 2)) with it: it.operands[1].fill(0) it.reset() assert_equal(it[0], [1, 2, 1, 2]) # Iterator inner loop should take argument contiguity into account x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) x[...] = np.arange(x.size).reshape(x.shape) y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) y_base_copy = y_base.copy() y = y_base[::2,:,None] it = np.nditer([y, x], ['buffered', 'external_loop', 'reduce_ok'], [['readwrite'], ['readonly']]) with it: for a, b in it: a.fill(2) assert_equal(y_base[1::2], y_base_copy[1::2]) assert_equal(y_base[::2], 2) def test_iter_buffering_reduction_reuse_reduce_loops(): # There was a bug triggering reuse of the reduce loop inappropriately, # which caused processing to happen in unnecessarily small chunks # and overran the buffer. a = np.zeros((2, 7)) b = np.zeros((1, 7)) it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], op_flags=[['readonly'], ['readwrite']], buffersize=5) with it: bufsizes = [x.shape[0] for x, y in it] assert_equal(bufsizes, [5, 2, 5, 2]) assert_equal(sum(bufsizes), a.size) def test_iter_writemasked_badinput(): a = np.zeros((2, 3)) b = np.zeros((3,)) m = np.array([[True, True, False], [False, True, False]]) m2 = np.array([True, True, False]) m3 = np.array([0, 1, 1], dtype='u1') mbad1 = np.array([0, 1, 1], dtype='i1') mbad2 = np.array([0, 1, 1], dtype='f4') # Need an 'arraymask' if any operand is 'writemasked' assert_raises(ValueError, nditer, [a, m], [], [['readwrite', 'writemasked'], ['readonly']]) # A 'writemasked' operand must not be readonly assert_raises(ValueError, nditer, [a, m], [], [['readonly', 'writemasked'], ['readonly', 'arraymask']]) # 'writemasked' and 'arraymask' may not be used together assert_raises(ValueError, nditer, [a, m], [], [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) # 'arraymask' may only be specified once assert_raises(ValueError, nditer, [a, m, m2], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask'], ['readonly', 'arraymask']]) # An 'arraymask' with nothing 'writemasked' also doesn't make sense assert_raises(ValueError, nditer, [a, m], [], [['readwrite'], ['readonly', 'arraymask']]) # A writemasked reduction requires a similarly smaller mask assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # But this should work with a smaller/equal mask to the reduction operand np.nditer([a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # The arraymask itself cannot be a reduction assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readwrite', 'arraymask']]) # A uint8 mask is ok too np.nditer([a, m3], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # An int8 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # A float32 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') def test_iter_writemasked(): a = np.zeros((3,), dtype='f8') msk = np.array([True, True, False]) # When buffering is unused, 'writemasked' effectively does nothing. # It's up to the user of the iterator to obey the requested semantics. it = np.nditer([a, msk], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) with it: for x, m in it: x[...] = 1 # Because we violated the semantics, all the values became 1 assert_equal(a, [1, 1, 1]) # Even if buffering is enabled, we still may be accessing the array # directly. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) with it: for x, m in it: x[...] = 2.5 # Because we violated the semantics, all the values became 2.5 assert_equal(a, [2.5, 2.5, 2.5]) # If buffering will definitely happening, for instance because of # a cast, only the items selected by the mask will be copied back from # the buffer. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['i8', None], casting='unsafe') with it: for x, m in it: x[...] = 3 # Even though we violated the semantics, only the selected values # were copied back assert_equal(a, [3, 3, 2.5]) def test_iter_non_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", "iterationneedsapi", "has_multi_index", "has_index", "dtypes", "ndim", "nop", "itersize", "finished"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = [ "multi_index", "index", "iterrange", "iterindex"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_element_deletion(): it = np.nditer(np.ones(3)) try: del it[1] del it[1:2] except TypeError: pass except Exception: raise AssertionError def test_iter_allocated_array_dtypes(): # If the dtype of an allocated output has a shape, the shape gets # tacked onto the end of the result. it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) for a, b in it: b[0] = a - 1 b[1] = a + 1 assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) # Make sure this works for scalars too it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) for a, b, c in it: c[0, 0] = a - b c[0, 1] = a + b c[1, 0] = a * b c[1, 1] = a / b assert_equal(it.operands[2], [[8, 12], [20, 5]]) def test_0d_iter(): # Basic test for iteration of 0-d arrays: i = nditer([2, 3], ['multi_index'], [['readonly']]*2) assert_equal(i.ndim, 0) assert_equal(next(i), (2, 3)) assert_equal(i.multi_index, ()) assert_equal(i.iterindex, 0) assert_raises(StopIteration, next, i) # test reset: i.reset() assert_equal(next(i), (2, 3)) assert_raises(StopIteration, next, i) # test forcing to 0-d i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) assert_equal(i.ndim, 0) assert_equal(len(i), 1) i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()], itershape=()) assert_equal(i.ndim, 0) assert_equal(len(i), 1) # passing an itershape alone is not enough, the op_axes are also needed with assert_raises(ValueError): nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=()) # Test a more complex buffered casting case (same as another test above) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.array(0.5, dtype='f4') i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = next(i) assert_equal(vals['a'], 0.5) assert_equal(vals['b'], 0) assert_equal(vals['c'], [[(0.5)]*3]*2) assert_equal(vals['d'], 0.5) def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to # give a legal array. size = np.iinfo(np.intp).max // 1024 arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) assert_raises(ValueError, nditer, (arr, arr[:, None])) # test the same for multiindex. That may get more interesting when # removing 0 dimensional axis is allowed (since an iterator can grow then) assert_raises(ValueError, nditer, (arr, arr[:, None]), flags=['multi_index']) def test_iter_too_large_with_multiindex(): # When a multi index is being tracked, the error is delayed this # checks the delayed error messages and getting below that by # removing an axis. base_size = 2**10 num = 1 while base_size**num < np.iinfo(np.intp).max: num += 1 shape_template = [1, 1] * num arrays = [] for i in range(num): shape = shape_template[:] shape[i * 2] = 2**10 arrays.append(np.empty(shape)) arrays = tuple(arrays) # arrays are now too large to be broadcast. The different modes test # different nditer functionality with or without GIL. for mode in range(6): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, -1, mode) # but if we do nothing with the nditer, it can be constructed: _multiarray_tests.test_nditer_too_large(arrays, -1, 7) # When an axis is removed, things should work again (half the time): for i in range(num): for mode in range(6): # an axis with size 1024 is removed: _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) # an axis with size 1 is removed: with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with it: it.operands[0][:] = 100 assert_equal(au, 100) # do it again, this time raise an error, it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) try: with it: assert_equal(au.flags.writeable, False) it.operands[0][:] = 0 raise ValueError('exit context manager on exception') except: pass assert_equal(au, 0) assert_equal(au.flags.writeable, True) # cannot reuse i outside context manager assert_raises(ValueError, getattr, it, 'operands') it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) with it: x = it.operands[0] x[:] = 6 assert_(x.flags.writebackifcopy) assert_equal(au, 6) assert_(not x.flags.writebackifcopy) x[:] = 123 # x.data still valid assert_equal(au, 6) # but not connected to au it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # reentering works with it: with it: for x in it: x[...] = 123 it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # make sure exiting the inner context manager closes the iterator with it: with it: for x in it: x[...] = 123 assert_raises(ValueError, getattr, it, 'operands') # do not crash if original data array is decrefed it = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) del au with it: for x in it: x[...] = 123 # make sure we cannot reenter the closed iterator enter = it.__enter__ assert_raises(RuntimeError, enter) def test_close_equivalent(): ''' using a context amanger and using nditer.close are equivalent ''' def add_close(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) for (a, b, c) in it: addop(a, b, out=c) ret = it.operands[2] it.close() return ret def add_context(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) with it: for (a, b, c) in it: addop(a, b, out=c) return it.operands[2] z = add_close(range(5), range(5)) assert_equal(z, range(0, 10, 2)) z = add_context(range(5), range(5)) assert_equal(z, range(0, 10, 2)) def test_close_raises(): it = np.nditer(np.arange(3)) assert_equal (next(it), 0) it.close() assert_raises(StopIteration, next, it) assert_raises(ValueError, getattr, it, 'operands') @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() with suppress_warnings() as sup: sup.record(RuntimeWarning) it = np.nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) del it assert len(sup.log) == 1
abalkin/numpy
numpy/core/tests/test_nditer.py
numpy/distutils/_shell_utils.py
"""Plugwise Climate component for Home Assistant.""" import logging import haanna import voluptuous as vol from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice from homeassistant.components.climate.const import ( CURRENT_HVAC_COOL, CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ( ATTR_TEMPERATURE, CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, TEMP_CELSIUS, ) from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE _LOGGER = logging.getLogger(__name__) # Configuration directives CONF_MIN_TEMP = "min_temp" CONF_MAX_TEMP = "max_temp" CONF_LEGACY = "legacy_anna" # Default directives DEFAULT_NAME = "Plugwise Thermostat" DEFAULT_USERNAME = "smile" DEFAULT_TIMEOUT = 10 DEFAULT_PORT = 80 DEFAULT_ICON = "mdi:thermometer" DEFAULT_MIN_TEMP = 4 DEFAULT_MAX_TEMP = 30 # HVAC modes HVAC_MODES_1 = [HVAC_MODE_HEAT, HVAC_MODE_AUTO] HVAC_MODES_2 = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO] # Read platform configuration PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_LEGACY, default=False): cv.boolean, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string, vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int, vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Add the Plugwise (Anna) Thermostat.""" api = haanna.Haanna( config[CONF_USERNAME], config[CONF_PASSWORD], config[CONF_HOST], config[CONF_PORT], config[CONF_LEGACY], ) try: api.ping_anna_thermostat() except OSError: _LOGGER.debug("Ping failed, retrying later", exc_info=True) raise PlatformNotReady devices = [ ThermostatDevice( api, config[CONF_NAME], config[CONF_MIN_TEMP], config[CONF_MAX_TEMP] ) ] add_entities(devices, True) class ThermostatDevice(ClimateDevice): """Representation of the Plugwise thermostat.""" def __init__(self, api, name, min_temp, max_temp): """Set up the Plugwise API.""" self._api = api self._min_temp = min_temp self._max_temp = max_temp self._name = name self._direct_objects = None self._domain_objects = None self._outdoor_temperature = None self._selected_schema = None self._last_active_schema = None self._preset_mode = None self._presets = None self._presets_list = None self._boiler_status = None self._heating_status = None self._cooling_status = None self._dhw_status = None self._schema_names = None self._schema_status = None self._current_temperature = None self._thermostat_temperature = None self._boiler_temperature = None self._water_pressure = None self._schedule_temperature = None self._hvac_mode = None @property def hvac_action(self): """Return the current hvac action.""" if self._heating_status or self._boiler_status or self._dhw_status: return CURRENT_HVAC_HEAT if self._cooling_status: return CURRENT_HVAC_COOL return CURRENT_HVAC_IDLE @property def name(self): """Return the name of the thermostat, if any.""" return self._name @property def icon(self): """Return the icon to use in the frontend.""" return DEFAULT_ICON @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS @property def device_state_attributes(self): """Return the device specific state attributes.""" attributes = {} if self._outdoor_temperature: attributes["outdoor_temperature"] = self._outdoor_temperature if self._schema_names: attributes["available_schemas"] = self._schema_names if self._selected_schema: attributes["selected_schema"] = self._selected_schema if self._boiler_temperature: attributes["boiler_temperature"] = self._boiler_temperature if self._water_pressure: attributes["water_pressure"] = self._water_pressure return attributes @property def preset_modes(self): """Return the available preset modes list. And make the presets with their temperatures available. """ return self._presets_list @property def hvac_modes(self): """Return the available hvac modes list.""" if self._heating_status is not None or self._boiler_status is not None: if self._cooling_status is not None: return HVAC_MODES_2 return HVAC_MODES_1 return None @property def hvac_mode(self): """Return current active hvac state.""" if self._schema_status: return HVAC_MODE_AUTO if self._heating_status or self._boiler_status or self._dhw_status: if self._cooling_status: return HVAC_MODE_HEAT_COOL return HVAC_MODE_HEAT return HVAC_MODE_OFF @property def target_temperature(self): """Return the target_temperature. From the XML the thermostat-value is used because it updates 'immediately' compared to the target_temperature-value. This way the information on the card is "immediately" updated after changing the preset, temperature, etc. """ return self._thermostat_temperature @property def preset_mode(self): """Return the active selected schedule-name. Or, return the active preset, or return Temporary in case of a manual change in the set-temperature with a weekschedule active. Or return Manual in case of a manual change and no weekschedule active. """ if self._presets: presets = self._presets preset_temperature = presets.get(self._preset_mode, "none") if self.hvac_mode == HVAC_MODE_AUTO: if self._thermostat_temperature == self._schedule_temperature: return "{}".format(self._selected_schema) if self._thermostat_temperature == preset_temperature: return self._preset_mode return "Temporary" if self._thermostat_temperature != preset_temperature: return "Manual" return self._preset_mode return None @property def current_temperature(self): """Return the current room temperature.""" return self._current_temperature @property def min_temp(self): """Return the minimal temperature possible to set.""" return self._min_temp @property def max_temp(self): """Return the maximum temperature possible to set.""" return self._max_temp @property def temperature_unit(self): """Return the unit of measured temperature.""" return TEMP_CELSIUS def set_temperature(self, **kwargs): """Set new target temperature.""" _LOGGER.debug("Adjusting temperature") temperature = kwargs.get(ATTR_TEMPERATURE) if temperature is not None and self._min_temp < temperature < self._max_temp: _LOGGER.debug("Changing temporary temperature") self._api.set_temperature(self._domain_objects, temperature) else: _LOGGER.error("Invalid temperature requested") def set_hvac_mode(self, hvac_mode): """Set the hvac mode.""" _LOGGER.debug("Adjusting hvac_mode (i.e. schedule/schema)") schema_mode = "false" if hvac_mode == HVAC_MODE_AUTO: schema_mode = "true" self._api.set_schema_state( self._domain_objects, self._last_active_schema, schema_mode ) def set_preset_mode(self, preset_mode): """Set the preset mode.""" _LOGGER.debug("Changing preset mode") self._api.set_preset(self._domain_objects, preset_mode) def update(self): """Update the data from the thermostat.""" _LOGGER.debug("Update called") self._direct_objects = self._api.get_direct_objects() self._domain_objects = self._api.get_domain_objects() self._outdoor_temperature = self._api.get_outdoor_temperature( self._domain_objects ) self._selected_schema = self._api.get_active_schema_name(self._domain_objects) self._last_active_schema = self._api.get_last_active_schema_name( self._domain_objects ) self._preset_mode = self._api.get_current_preset(self._domain_objects) self._presets = self._api.get_presets(self._domain_objects) self._presets_list = list(self._api.get_presets(self._domain_objects)) self._boiler_status = self._api.get_boiler_status(self._direct_objects) self._heating_status = self._api.get_heating_status(self._direct_objects) self._cooling_status = self._api.get_cooling_status(self._direct_objects) self._dhw_status = self._api.get_domestic_hot_water_status(self._direct_objects) self._schema_names = self._api.get_schema_names(self._domain_objects) self._schema_status = self._api.get_schema_state(self._domain_objects) self._current_temperature = self._api.get_current_temperature( self._domain_objects ) self._thermostat_temperature = self._api.get_thermostat_temperature( self._domain_objects ) self._schedule_temperature = self._api.get_schedule_temperature( self._domain_objects ) self._boiler_temperature = self._api.get_boiler_temperature( self._domain_objects ) self._water_pressure = self._api.get_water_pressure(self._domain_objects)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/plugwise/climate.py
"""Support for Qwikswitch relays.""" from homeassistant.components.switch import SwitchDevice from . import DOMAIN as QWIKSWITCH, QSToggleEntity async def async_setup_platform(hass, _, add_entities, discovery_info=None): """Add switches from the main Qwikswitch component.""" if discovery_info is None: return qsusb = hass.data[QWIKSWITCH] devs = [QSSwitch(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]] add_entities(devs) class QSSwitch(QSToggleEntity, SwitchDevice): """Switch based on a Qwikswitch relay module."""
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/qwikswitch/switch.py
"""Config flow for Somfy.""" import logging from homeassistant import config_entries from homeassistant.helpers import config_entry_oauth2_flow from .const import DOMAIN _LOGGER = logging.getLogger(__name__) @config_entries.HANDLERS.register(DOMAIN) class SomfyFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler): """Config flow to handle Somfy OAuth2 authentication.""" DOMAIN = DOMAIN CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL @property def logger(self) -> logging.Logger: """Return logger.""" return logging.getLogger(__name__) async def async_step_user(self, user_input=None): """Handle a flow start.""" if self.hass.config_entries.async_entries(DOMAIN): return self.async_abort(reason="already_setup") return await super().async_step_user(user_input)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/somfy/config_flow.py
"""Trigger an automation when a LiteJet switch is released.""" import logging import voluptuous as vol from homeassistant.const import CONF_PLATFORM from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import track_point_in_utc_time import homeassistant.util.dt as dt_util # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) CONF_NUMBER = "number" CONF_HELD_MORE_THAN = "held_more_than" CONF_HELD_LESS_THAN = "held_less_than" TRIGGER_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "litejet", vol.Required(CONF_NUMBER): cv.positive_int, vol.Optional(CONF_HELD_MORE_THAN): vol.All( cv.time_period, cv.positive_timedelta ), vol.Optional(CONF_HELD_LESS_THAN): vol.All( cv.time_period, cv.positive_timedelta ), } ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for events based on configuration.""" number = config.get(CONF_NUMBER) held_more_than = config.get(CONF_HELD_MORE_THAN) held_less_than = config.get(CONF_HELD_LESS_THAN) pressed_time = None cancel_pressed_more_than = None @callback def call_action(): """Call action with right context.""" hass.async_run_job( action, { "trigger": { CONF_PLATFORM: "litejet", CONF_NUMBER: number, CONF_HELD_MORE_THAN: held_more_than, CONF_HELD_LESS_THAN: held_less_than, } }, ) # held_more_than and held_less_than: trigger on released (if in time range) # held_more_than: trigger after pressed with calculation # held_less_than: trigger on released with calculation # neither: trigger on pressed @callback def pressed_more_than_satisfied(now): """Handle the LiteJet's switch's button pressed >= held_more_than.""" call_action() def pressed(): """Handle the press of the LiteJet switch's button.""" nonlocal cancel_pressed_more_than, pressed_time nonlocal held_less_than, held_more_than pressed_time = dt_util.utcnow() if held_more_than is None and held_less_than is None: hass.add_job(call_action) if held_more_than is not None and held_less_than is None: cancel_pressed_more_than = track_point_in_utc_time( hass, pressed_more_than_satisfied, dt_util.utcnow() + held_more_than ) def released(): """Handle the release of the LiteJet switch's button.""" nonlocal cancel_pressed_more_than, pressed_time nonlocal held_less_than, held_more_than # pylint: disable=not-callable if cancel_pressed_more_than is not None: cancel_pressed_more_than() cancel_pressed_more_than = None held_time = dt_util.utcnow() - pressed_time if held_less_than is not None and held_time < held_less_than: if held_more_than is None or held_time > held_more_than: hass.add_job(call_action) hass.data["litejet_system"].on_switch_pressed(number, pressed) hass.data["litejet_system"].on_switch_released(number, released) @callback def async_remove(): """Remove all subscriptions used for this trigger.""" return return async_remove
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/automation/litejet.py
"""Errors for the UniFi component.""" from homeassistant.exceptions import HomeAssistantError class UnifiException(HomeAssistantError): """Base class for UniFi exceptions.""" class AlreadyConfigured(UnifiException): """Controller is already configured.""" class AuthenticationRequired(UnifiException): """Unknown error occurred.""" class CannotConnect(UnifiException): """Unable to connect to the controller.""" class LoginRequired(UnifiException): """Component got logged out.""" class UserLevel(UnifiException): """User level too low."""
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/unifi/errors.py
"""Consts for Kaiterra integration.""" from datetime import timedelta DOMAIN = "kaiterra" DISPATCHER_KAITERRA = "kaiterra_update" AQI_SCALE = { "cn": [0, 50, 100, 150, 200, 300, 400, 500], "in": [0, 50, 100, 200, 300, 400, 500], "us": [0, 50, 100, 150, 200, 300, 500], } AQI_LEVEL = { "cn": [ "Good", "Satisfactory", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], "in": [ "Good", "Satisfactory", "Moderately polluted", "Poor", "Very poor", "Severe", ], "us": [ "Good", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], } ATTR_VOC = "volatile_organic_compounds" ATTR_AQI_LEVEL = "air_quality_index_level" ATTR_AQI_POLLUTANT = "air_quality_index_pollutant" AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"] AVAILABLE_UNITS = ["x", "%", "C", "F", "mg/m³", "µg/m³", "ppm", "ppb"] AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"] CONF_AQI_STANDARD = "aqi_standard" CONF_PREFERRED_UNITS = "preferred_units" DEFAULT_AQI_STANDARD = "us" DEFAULT_PREFERRED_UNIT = [] DEFAULT_SCAN_INTERVAL = timedelta(seconds=30) KAITERRA_COMPONENTS = ["sensor", "air_quality"]
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/kaiterra/const.py
"""Support for IHC lights.""" import logging from homeassistant.components.light import ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light from . import IHC_CONTROLLER, IHC_DATA, IHC_INFO from .const import CONF_DIMMABLE, CONF_OFF_ID, CONF_ON_ID from .ihcdevice import IHCDevice from .util import async_pulse, async_set_bool, async_set_int _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the IHC lights platform.""" if discovery_info is None: return devices = [] for name, device in discovery_info.items(): ihc_id = device["ihc_id"] product_cfg = device["product_cfg"] product = device["product"] # Find controller that corresponds with device id ctrl_id = device["ctrl_id"] ihc_key = IHC_DATA.format(ctrl_id) info = hass.data[ihc_key][IHC_INFO] ihc_controller = hass.data[ihc_key][IHC_CONTROLLER] ihc_off_id = product_cfg.get(CONF_OFF_ID) ihc_on_id = product_cfg.get(CONF_ON_ID) dimmable = product_cfg[CONF_DIMMABLE] light = IhcLight( ihc_controller, name, ihc_id, ihc_off_id, ihc_on_id, info, dimmable, product ) devices.append(light) add_entities(devices) class IhcLight(IHCDevice, Light): """Representation of a IHC light. For dimmable lights, the associated IHC resource should be a light level (integer). For non dimmable light the IHC resource should be an on/off (boolean) resource """ def __init__( self, ihc_controller, name, ihc_id: int, ihc_off_id: int, ihc_on_id: int, info: bool, dimmable=False, product=None, ) -> None: """Initialize the light.""" super().__init__(ihc_controller, name, ihc_id, info, product) self._ihc_off_id = ihc_off_id self._ihc_on_id = ihc_on_id self._brightness = 0 self._dimmable = dimmable self._state = None @property def brightness(self) -> int: """Return the brightness of this light between 0..255.""" return self._brightness @property def is_on(self) -> bool: """Return true if light is on.""" return self._state @property def supported_features(self): """Flag supported features.""" if self._dimmable: return SUPPORT_BRIGHTNESS return 0 async def async_turn_on(self, **kwargs): """Turn the light on.""" if ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] else: brightness = self._brightness if brightness == 0: brightness = 255 if self._dimmable: await async_set_int( self.hass, self.ihc_controller, self.ihc_id, int(brightness * 100 / 255) ) else: if self._ihc_on_id: await async_pulse(self.hass, self.ihc_controller, self._ihc_on_id) else: await async_set_bool(self.hass, self.ihc_controller, self.ihc_id, True) async def async_turn_off(self, **kwargs): """Turn the light off.""" if self._dimmable: await async_set_int(self.hass, self.ihc_controller, self.ihc_id, 0) else: if self._ihc_off_id: await async_pulse(self.hass, self.ihc_controller, self._ihc_off_id) else: await async_set_bool(self.hass, self.ihc_controller, self.ihc_id, False) def on_ihc_change(self, ihc_id, value): """Handle IHC notifications.""" if isinstance(value, bool): self._dimmable = False self._state = value != 0 else: self._dimmable = True self._state = value > 0 if self._state: self._brightness = int(value * 255 / 100) self.schedule_update_ha_state()
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/ihc/light.py
"""Support for Homematic locks.""" import logging from homeassistant.components.lock import SUPPORT_OPEN, LockDevice from .const import ATTR_DISCOVER_DEVICES from .entity import HMDevice _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Homematic lock platform.""" if discovery_info is None: return devices = [] for conf in discovery_info[ATTR_DISCOVER_DEVICES]: devices.append(HMLock(conf)) add_entities(devices, True) class HMLock(HMDevice, LockDevice): """Representation of a Homematic lock aka KeyMatic.""" @property def is_locked(self): """Return true if the lock is locked.""" return not bool(self._hm_get_state()) def lock(self, **kwargs): """Lock the lock.""" self._hmdevice.lock() def unlock(self, **kwargs): """Unlock the lock.""" self._hmdevice.unlock() def open(self, **kwargs): """Open the door latch.""" self._hmdevice.open() def _init_data_struct(self): """Generate the data dictionary (self._data) from metadata.""" self._state = "STATE" self._data.update({self._state: None}) @property def supported_features(self): """Flag supported features.""" return SUPPORT_OPEN
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/homematic/lock.py
"""OpenEnergyMonitor Thermostat Support.""" import logging from oemthermostat import Thermostat import requests import voluptuous as vol from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice from homeassistant.components.climate.const import ( CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, CURRENT_HVAC_OFF, HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ( ATTR_TEMPERATURE, CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, TEMP_CELSIUS, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default="Thermostat"): cv.string, vol.Optional(CONF_PORT, default=80): cv.port, vol.Inclusive(CONF_USERNAME, "authentication"): cv.string, vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string, } ) SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE SUPPORT_HVAC = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF] def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the oemthermostat platform.""" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) try: therm = Thermostat(host, port=port, username=username, password=password) except (ValueError, AssertionError, requests.RequestException): return False add_entities((ThermostatDevice(therm, name),), True) class ThermostatDevice(ClimateDevice): """Interface class for the oemthermostat module.""" def __init__(self, thermostat, name): """Initialize the device.""" self._name = name self.thermostat = thermostat # set up internal state varS self._state = None self._temperature = None self._setpoint = None self._mode = None @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS @property def hvac_mode(self): """Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. """ if self._mode == 2: return HVAC_MODE_HEAT if self._mode == 1: return HVAC_MODE_AUTO return HVAC_MODE_OFF @property def hvac_modes(self): """Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. """ return SUPPORT_HVAC @property def name(self): """Return the name of this Thermostat.""" return self._name @property def temperature_unit(self): """Return the unit of measurement used by the platform.""" return TEMP_CELSIUS @property def hvac_action(self): """Return current hvac i.e. heat, cool, idle.""" if not self._mode: return CURRENT_HVAC_OFF if self._state: return CURRENT_HVAC_HEAT return CURRENT_HVAC_IDLE @property def current_temperature(self): """Return the current temperature.""" return self._temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._setpoint def set_hvac_mode(self, hvac_mode): """Set new target hvac mode.""" if hvac_mode == HVAC_MODE_AUTO: self.thermostat.mode = 1 elif hvac_mode == HVAC_MODE_HEAT: self.thermostat.mode = 2 elif hvac_mode == HVAC_MODE_OFF: self.thermostat.mode = 0 def set_temperature(self, **kwargs): """Set the temperature.""" temp = kwargs.get(ATTR_TEMPERATURE) self.thermostat.setpoint = temp def update(self): """Update local state.""" self._setpoint = self.thermostat.setpoint self._temperature = self.thermostat.temperature self._state = self.thermostat.state self._mode = self.thermostat.mode
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/oem/climate.py
"""Reproduce an Vacuum state.""" import asyncio import logging from typing import Iterable, Optional from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_IDLE, STATE_OFF, STATE_ON, STATE_PAUSED, ) from homeassistant.core import Context, State from homeassistant.helpers.typing import HomeAssistantType from . import ( ATTR_FAN_SPEED, DOMAIN, SERVICE_PAUSE, SERVICE_RETURN_TO_BASE, SERVICE_SET_FAN_SPEED, SERVICE_START, SERVICE_STOP, STATE_CLEANING, STATE_DOCKED, STATE_RETURNING, ) _LOGGER = logging.getLogger(__name__) VALID_STATES_TOGGLE = {STATE_ON, STATE_OFF} VALID_STATES_STATE = { STATE_CLEANING, STATE_DOCKED, STATE_IDLE, STATE_RETURNING, STATE_PAUSED, } async def _async_reproduce_state( hass: HomeAssistantType, state: State, context: Optional[Context] = None ) -> None: """Reproduce a single state.""" cur_state = hass.states.get(state.entity_id) if cur_state is None: _LOGGER.warning("Unable to find entity %s", state.entity_id) return if state.state not in VALID_STATES_TOGGLE and state.state not in VALID_STATES_STATE: _LOGGER.warning( "Invalid state specified for %s: %s", state.entity_id, state.state ) return # Return if we are already at the right state. if cur_state.state == state.state and cur_state.attributes.get( ATTR_FAN_SPEED ) == state.attributes.get(ATTR_FAN_SPEED): return service_data = {ATTR_ENTITY_ID: state.entity_id} if cur_state.state != state.state: # Wrong state if state.state == STATE_ON: service = SERVICE_TURN_ON elif state.state == STATE_OFF: service = SERVICE_TURN_OFF elif state.state == STATE_CLEANING: service = SERVICE_START elif state.state == STATE_DOCKED or state.state == STATE_RETURNING: service = SERVICE_RETURN_TO_BASE elif state.state == STATE_IDLE: service = SERVICE_STOP elif state.state == STATE_PAUSED: service = SERVICE_PAUSE await hass.services.async_call( DOMAIN, service, service_data, context=context, blocking=True ) if cur_state.attributes.get(ATTR_FAN_SPEED) != state.attributes.get(ATTR_FAN_SPEED): # Wrong fan speed service_data["fan_speed"] = state.attributes[ATTR_FAN_SPEED] await hass.services.async_call( DOMAIN, SERVICE_SET_FAN_SPEED, service_data, context=context, blocking=True ) async def async_reproduce_states( hass: HomeAssistantType, states: Iterable[State], context: Optional[Context] = None ) -> None: """Reproduce Vacuum states.""" # Reproduce states in parallel. await asyncio.gather( *(_async_reproduce_state(hass, state, context) for state in states) )
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/vacuum/reproduce_state.py
"""Support for HomeMatic covers.""" import logging from homeassistant.components.cover import ( ATTR_POSITION, ATTR_TILT_POSITION, CoverDevice, ) from .const import ATTR_DISCOVER_DEVICES from .entity import HMDevice _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the platform.""" if discovery_info is None: return devices = [] for conf in discovery_info[ATTR_DISCOVER_DEVICES]: new_device = HMCover(conf) devices.append(new_device) add_entities(devices, True) class HMCover(HMDevice, CoverDevice): """Representation a HomeMatic Cover.""" @property def current_cover_position(self): """ Return current position of cover. None is unknown, 0 is closed, 100 is fully open. """ return int(self._hm_get_state() * 100) def set_cover_position(self, **kwargs): """Move the cover to a specific position.""" if ATTR_POSITION in kwargs: position = float(kwargs[ATTR_POSITION]) position = min(100, max(0, position)) level = position / 100.0 self._hmdevice.set_level(level, self._channel) @property def is_closed(self): """Return if the cover is closed.""" if self.current_cover_position is not None: return self.current_cover_position == 0 return None def open_cover(self, **kwargs): """Open the cover.""" self._hmdevice.move_up(self._channel) def close_cover(self, **kwargs): """Close the cover.""" self._hmdevice.move_down(self._channel) def stop_cover(self, **kwargs): """Stop the device if in motion.""" self._hmdevice.stop(self._channel) def _init_data_struct(self): """Generate a data dictionary (self._data) from metadata.""" self._state = "LEVEL" self._data.update({self._state: None}) if "LEVEL_2" in self._hmdevice.WRITENODE: self._data.update({"LEVEL_2": None}) @property def current_cover_tilt_position(self): """Return current position of cover tilt. None is unknown, 0 is closed, 100 is fully open. """ if "LEVEL_2" not in self._data: return None return int(self._data.get("LEVEL_2", 0) * 100) def set_cover_tilt_position(self, **kwargs): """Move the cover tilt to a specific position.""" if "LEVEL_2" in self._data and ATTR_TILT_POSITION in kwargs: position = float(kwargs[ATTR_TILT_POSITION]) position = min(100, max(0, position)) level = position / 100.0 self._hmdevice.set_cover_tilt_position(level, self._channel) def open_cover_tilt(self, **kwargs): """Open the cover tilt.""" if "LEVEL_2" in self._data: self._hmdevice.open_slats() def close_cover_tilt(self, **kwargs): """Close the cover tilt.""" if "LEVEL_2" in self._data: self._hmdevice.close_slats() def stop_cover_tilt(self, **kwargs): """Stop cover tilt.""" if "LEVEL_2" in self._data: self.stop_cover(**kwargs)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/homematic/cover.py
"""Support for Switchbot.""" import logging from typing import Any, Dict # pylint: disable=import-error, no-member import switchbot import voluptuous as vol from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice from homeassistant.const import CONF_MAC, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.restore_state import RestoreEntity _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Switchbot" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_MAC): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Perform the setup for Switchbot devices.""" name = config.get(CONF_NAME) mac_addr = config[CONF_MAC] add_entities([SwitchBot(mac_addr, name)]) class SwitchBot(SwitchDevice, RestoreEntity): """Representation of a Switchbot.""" def __init__(self, mac, name) -> None: """Initialize the Switchbot.""" self._state = None self._last_run_success = None self._name = name self._mac = mac self._device = switchbot.Switchbot(mac=mac) async def async_added_to_hass(self): """Run when entity about to be added.""" await super().async_added_to_hass() state = await self.async_get_last_state() if not state: return self._state = state.state == "on" def turn_on(self, **kwargs) -> None: """Turn device on.""" if self._device.turn_on(): self._state = True self._last_run_success = True else: self._last_run_success = False def turn_off(self, **kwargs) -> None: """Turn device off.""" if self._device.turn_off(): self._state = False self._last_run_success = True else: self._last_run_success = False @property def assumed_state(self) -> bool: """Return true if unable to access real state of entity.""" return True @property def is_on(self) -> bool: """Return true if device is on.""" return self._state @property def unique_id(self) -> str: """Return a unique, Home Assistant friendly identifier for this entity.""" return self._mac.replace(":", "") @property def name(self) -> str: """Return the name of the switch.""" return self._name @property def device_state_attributes(self) -> Dict[str, Any]: """Return the state attributes.""" return {"last_run_success": self._last_run_success}
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/switchbot/switch.py
"""Weather information for air and road temperature (by Trafikverket).""" import asyncio from datetime import timedelta import logging import aiohttp from pytrafikverket.trafikverket_weather import TrafikverketWeather import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_API_KEY, CONF_MONITORED_CONDITIONS, CONF_NAME, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by Trafikverket" ATTR_MEASURE_TIME = "measure_time" ATTR_ACTIVE = "active" CONF_STATION = "station" MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10) SCAN_INTERVAL = timedelta(seconds=300) SENSOR_TYPES = { "air_temp": [ "Air temperature", TEMP_CELSIUS, "air_temp", "mdi:thermometer", DEVICE_CLASS_TEMPERATURE, ], "road_temp": [ "Road temperature", TEMP_CELSIUS, "road_temp", "mdi:thermometer", DEVICE_CLASS_TEMPERATURE, ], "precipitation": [ "Precipitation type", None, "precipitationtype", "mdi:weather-snowy-rainy", None, ], "wind_direction": [ "Wind direction", "°", "winddirection", "mdi:flag-triangle", None, ], "wind_direction_text": [ "Wind direction text", None, "winddirectiontext", "mdi:flag-triangle", None, ], "wind_speed": ["Wind speed", "m/s", "windforce", "mdi:weather-windy", None], "humidity": [ "Humidity", "%", "humidity", "mdi:water-percent", DEVICE_CLASS_HUMIDITY, ], "precipitation_amount": [ "Precipitation amount", "mm", "precipitation_amount", "mdi:cup-water", None, ], "precipitation_amountname": [ "Precipitation name", None, "precipitation_amountname", "mdi:weather-pouring", None, ], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_NAME): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_STATION): cv.string, vol.Required(CONF_MONITORED_CONDITIONS, default=[]): [vol.In(SENSOR_TYPES)], } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Trafikverket sensor platform.""" sensor_name = config[CONF_NAME] sensor_api = config[CONF_API_KEY] sensor_station = config[CONF_STATION] web_session = async_get_clientsession(hass) weather_api = TrafikverketWeather(web_session, sensor_api) dev = [] for condition in config[CONF_MONITORED_CONDITIONS]: dev.append( TrafikverketWeatherStation( weather_api, sensor_name, condition, sensor_station ) ) if dev: async_add_entities(dev, True) class TrafikverketWeatherStation(Entity): """Representation of a Trafikverket sensor.""" def __init__(self, weather_api, name, sensor_type, sensor_station): """Initialize the sensor.""" self._client = name self._name = SENSOR_TYPES[sensor_type][0] self._type = sensor_type self._state = None self._unit = SENSOR_TYPES[sensor_type][1] self._station = sensor_station self._weather_api = weather_api self._icon = SENSOR_TYPES[sensor_type][3] self._device_class = SENSOR_TYPES[sensor_type][4] self._weather = None @property def name(self): """Return the name of the sensor.""" return f"{self._client} {self._name}" @property def icon(self): """Icon to use in the frontend.""" return self._icon @property def device_state_attributes(self): """Return the state attributes of Trafikverket Weatherstation.""" return { ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_ACTIVE: self._weather.active, ATTR_MEASURE_TIME: self._weather.measure_time, } @property def device_class(self): """Return the device class of the sensor.""" return self._device_class @property def state(self): """Return the state of the device.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from Trafikverket and updates the states.""" try: self._weather = await self._weather_api.async_get_weather(self._station) self._state = getattr(self._weather, SENSOR_TYPES[self._type][2]) except (asyncio.TimeoutError, aiohttp.ClientError, ValueError) as error: _LOGGER.error("Could not fetch weather data: %s", error)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/trafikverket_weatherstation/sensor.py
"""Support for Lupusec Security System switches.""" from datetime import timedelta import logging import lupupy.constants as CONST from homeassistant.components.switch import SwitchDevice from . import DOMAIN as LUPUSEC_DOMAIN, LupusecDevice SCAN_INTERVAL = timedelta(seconds=2) _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up Lupusec switch devices.""" if discovery_info is None: return data = hass.data[LUPUSEC_DOMAIN] devices = [] for device in data.lupusec.get_devices(generic_type=CONST.TYPE_SWITCH): devices.append(LupusecSwitch(data, device)) add_entities(devices) class LupusecSwitch(LupusecDevice, SwitchDevice): """Representation of a Lupusec switch.""" def turn_on(self, **kwargs): """Turn on the device.""" self._device.switch_on() def turn_off(self, **kwargs): """Turn off the device.""" self._device.switch_off() @property def is_on(self): """Return true if device is on.""" return self._device.is_on
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/lupusec/switch.py
"""Support for OASA Telematics from telematics.oasa.gr.""" from datetime import timedelta import logging from operator import itemgetter import oasatelematics import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, DEVICE_CLASS_TIMESTAMP import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import dt as dt_util _LOGGER = logging.getLogger(__name__) ATTR_STOP_ID = "stop_id" ATTR_STOP_NAME = "stop_name" ATTR_ROUTE_ID = "route_id" ATTR_ROUTE_NAME = "route_name" ATTR_NEXT_ARRIVAL = "next_arrival" ATTR_SECOND_NEXT_ARRIVAL = "second_next_arrival" ATTR_NEXT_DEPARTURE = "next_departure" ATTRIBUTION = "Data retrieved from telematics.oasa.gr" CONF_STOP_ID = "stop_id" CONF_ROUTE_ID = "route_id" DEFAULT_NAME = "OASA Telematics" ICON = "mdi:bus" SCAN_INTERVAL = timedelta(seconds=60) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_STOP_ID): cv.string, vol.Required(CONF_ROUTE_ID): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the OASA Telematics sensor.""" name = config[CONF_NAME] stop_id = config[CONF_STOP_ID] route_id = config.get(CONF_ROUTE_ID) data = OASATelematicsData(stop_id, route_id) add_entities([OASATelematicsSensor(data, stop_id, route_id, name)], True) class OASATelematicsSensor(Entity): """Implementation of the OASA Telematics sensor.""" def __init__(self, data, stop_id, route_id, name): """Initialize the sensor.""" self.data = data self._name = name self._stop_id = stop_id self._route_id = route_id self._name_data = self._times = self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def device_class(self): """Return the class of this sensor.""" return DEVICE_CLASS_TIMESTAMP @property def state(self): """Return the state of the sensor.""" return self._state @property def device_state_attributes(self): """Return the state attributes.""" params = {} if self._times is not None: next_arrival_data = self._times[0] if ATTR_NEXT_ARRIVAL in next_arrival_data: next_arrival = next_arrival_data[ATTR_NEXT_ARRIVAL] params.update({ATTR_NEXT_ARRIVAL: next_arrival.isoformat()}) if len(self._times) > 1: second_next_arrival_time = self._times[1][ATTR_NEXT_ARRIVAL] if second_next_arrival_time is not None: second_arrival = second_next_arrival_time params.update( {ATTR_SECOND_NEXT_ARRIVAL: second_arrival.isoformat()} ) params.update( { ATTR_ROUTE_ID: self._times[0][ATTR_ROUTE_ID], ATTR_STOP_ID: self._stop_id, ATTR_ATTRIBUTION: ATTRIBUTION, } ) params.update( { ATTR_ROUTE_NAME: self._name_data[ATTR_ROUTE_NAME], ATTR_STOP_NAME: self._name_data[ATTR_STOP_NAME], } ) return {k: v for k, v in params.items() if v} @property def icon(self): """Icon to use in the frontend, if any.""" return ICON def update(self): """Get the latest data from OASA API and update the states.""" self.data.update() self._times = self.data.info self._name_data = self.data.name_data next_arrival_data = self._times[0] if ATTR_NEXT_ARRIVAL in next_arrival_data: self._state = next_arrival_data[ATTR_NEXT_ARRIVAL].isoformat() class OASATelematicsData: """The class for handling data retrieval.""" def __init__(self, stop_id, route_id): """Initialize the data object.""" self.stop_id = stop_id self.route_id = route_id self.info = self.empty_result() self.oasa_api = oasatelematics self.name_data = { ATTR_ROUTE_NAME: self.get_route_name(), ATTR_STOP_NAME: self.get_stop_name(), } def empty_result(self): """Object returned when no arrivals are found.""" return [{ATTR_ROUTE_ID: self.route_id}] def get_route_name(self): """Get the route name from the API.""" try: route = self.oasa_api.getRouteName(self.route_id) if route: return route[0].get("route_departure_eng") except TypeError: _LOGGER.error("Cannot get route name from OASA API") return None def get_stop_name(self): """Get the stop name from the API.""" try: name_data = self.oasa_api.getStopNameAndXY(self.stop_id) if name_data: return name_data[0].get("stop_descr_matrix_eng") except TypeError: _LOGGER.error("Cannot get stop name from OASA API") return None def update(self): """Get the latest arrival data from telematics.oasa.gr API.""" self.info = [] results = self.oasa_api.getStopArrivals(self.stop_id) if not results: self.info = self.empty_result() return # Parse results results = [r for r in results if r.get("route_code") in self.route_id] current_time = dt_util.utcnow() for result in results: btime2 = result.get("btime2") if btime2 is not None: arrival_min = int(btime2) timestamp = current_time + timedelta(minutes=arrival_min) arrival_data = { ATTR_NEXT_ARRIVAL: timestamp, ATTR_ROUTE_ID: self.route_id, } self.info.append(arrival_data) if not self.info: _LOGGER.debug("No arrivals with given parameters") self.info = self.empty_result() return # Sort the data by time sort = sorted(self.info, key=itemgetter(ATTR_NEXT_ARRIVAL)) self.info = sort
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/oasa_telematics/sensor.py
"""Support for FRITZ!Box routers.""" import logging from fritzconnection.lib.fritzhosts import FritzHosts import voluptuous as vol from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA, DeviceScanner, ) from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_DEFAULT_IP = "169.254.1.1" # This IP is valid for all FRITZ!Box routers. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_HOST, default=CONF_DEFAULT_IP): cv.string, vol.Optional(CONF_PASSWORD, default="admin"): cv.string, vol.Optional(CONF_USERNAME, default=""): cv.string, } ) def get_scanner(hass, config): """Validate the configuration and return FritzBoxScanner.""" scanner = FritzBoxScanner(config[DOMAIN]) return scanner if scanner.success_init else None class FritzBoxScanner(DeviceScanner): """This class queries a FRITZ!Box router.""" def __init__(self, config): """Initialize the scanner.""" self.last_results = [] self.host = config[CONF_HOST] self.username = config[CONF_USERNAME] self.password = config[CONF_PASSWORD] self.success_init = True # Establish a connection to the FRITZ!Box. try: self.fritz_box = FritzHosts( address=self.host, user=self.username, password=self.password ) except (ValueError, TypeError): self.fritz_box = None # At this point it is difficult to tell if a connection is established. # So just check for null objects. if self.fritz_box is None or not self.fritz_box.modelname: self.success_init = False if self.success_init: _LOGGER.info("Successfully connected to %s", self.fritz_box.modelname) self._update_info() else: _LOGGER.error( "Failed to establish connection to FRITZ!Box with IP: %s", self.host ) def scan_devices(self): """Scan for new devices and return a list of found device ids.""" self._update_info() active_hosts = [] for known_host in self.last_results: if known_host["status"] and known_host.get("mac"): active_hosts.append(known_host["mac"]) return active_hosts def get_device_name(self, device): """Return the name of the given device or None if is not known.""" ret = self.fritz_box.get_specific_host_entry(device).get("NewHostName") if ret == {}: return None return ret def get_extra_attributes(self, device): """Return the attributes (ip, mac) of the given device or None if is not known.""" ip_device = self.fritz_box.get_specific_host_entry(device).get("NewIPAddress") if not ip_device: return {} return {"ip": ip_device, "mac": device} def _update_info(self): """Retrieve latest information from the FRITZ!Box.""" if not self.success_init: return False _LOGGER.debug("Scanning") self.last_results = self.fritz_box.get_hosts_info() return True
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/fritz/device_tracker.py
"""Support for tracking consumption over given periods of time.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.const import CONF_NAME from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity from .const import ( ATTR_TARIFF, CONF_METER, CONF_METER_NET_CONSUMPTION, CONF_METER_OFFSET, CONF_METER_TYPE, CONF_SOURCE_SENSOR, CONF_TARIFF, CONF_TARIFF_ENTITY, CONF_TARIFFS, DATA_UTILITY, DOMAIN, METER_TYPES, SERVICE_RESET, SERVICE_SELECT_NEXT_TARIFF, SERVICE_SELECT_TARIFF, SIGNAL_RESET_METER, ) _LOGGER = logging.getLogger(__name__) TARIFF_ICON = "mdi:clock-outline" ATTR_TARIFFS = "tariffs" DEFAULT_OFFSET = timedelta(hours=0) METER_CONFIG_SCHEMA = vol.Schema( { vol.Required(CONF_SOURCE_SENSOR): cv.entity_id, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES), vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All( cv.time_period, cv.positive_timedelta ), vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean, vol.Optional(CONF_TARIFFS, default=[]): vol.All(cv.ensure_list, [cv.string]), } ) CONFIG_SCHEMA = vol.Schema( {DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA ) async def async_setup(hass, config): """Set up an Utility Meter.""" component = EntityComponent(_LOGGER, DOMAIN, hass) hass.data[DATA_UTILITY] = {} register_services = False for meter, conf in config.get(DOMAIN).items(): _LOGGER.debug("Setup %s.%s", DOMAIN, meter) hass.data[DATA_UTILITY][meter] = conf if not conf[CONF_TARIFFS]: # only one entity is required hass.async_create_task( discovery.async_load_platform( hass, SENSOR_DOMAIN, DOMAIN, [{CONF_METER: meter, CONF_NAME: meter}], config, ) ) else: # create tariff selection await component.async_add_entities( [TariffSelect(meter, list(conf[CONF_TARIFFS]))] ) hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format( DOMAIN, meter ) # add one meter for each tariff tariff_confs = [] for tariff in conf[CONF_TARIFFS]: tariff_confs.append( { CONF_METER: meter, CONF_NAME: f"{meter} {tariff}", CONF_TARIFF: tariff, } ) hass.async_create_task( discovery.async_load_platform( hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config ) ) register_services = True if register_services: component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters") component.async_register_entity_service( SERVICE_SELECT_TARIFF, {vol.Required(ATTR_TARIFF): cv.string}, "async_select_tariff", ) component.async_register_entity_service( SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff" ) return True class TariffSelect(RestoreEntity): """Representation of a Tariff selector.""" def __init__(self, name, tariffs): """Initialize a tariff selector.""" self._name = name self._current_tariff = None self._tariffs = tariffs self._icon = TARIFF_ICON async def async_added_to_hass(self): """Run when entity about to be added.""" await super().async_added_to_hass() if self._current_tariff is not None: return state = await self.async_get_last_state() if not state or state.state not in self._tariffs: self._current_tariff = self._tariffs[0] else: self._current_tariff = state.state @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return the name of the select input.""" return self._name @property def icon(self): """Return the icon to be used for this entity.""" return self._icon @property def state(self): """Return the state of the component.""" return self._current_tariff @property def state_attributes(self): """Return the state attributes.""" return {ATTR_TARIFFS: self._tariffs} async def async_reset_meters(self): """Reset all sensors of this meter.""" _LOGGER.debug("reset meter %s", self.entity_id) async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id) async def async_select_tariff(self, tariff): """Select new option.""" if tariff not in self._tariffs: _LOGGER.warning( "Invalid tariff: %s (possible tariffs: %s)", tariff, ", ".join(self._tariffs), ) return self._current_tariff = tariff await self.async_update_ha_state() async def async_next_tariff(self): """Offset current index.""" current_index = self._tariffs.index(self._current_tariff) new_index = (current_index + 1) % len(self._tariffs) self._current_tariff = self._tariffs[new_index] await self.async_update_ha_state()
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/utility_meter/__init__.py
"""Support for Ombi.""" from datetime import timedelta import logging from pyombi import OmbiError from homeassistant.helpers.entity import Entity from .const import DOMAIN, SENSOR_TYPES _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=60) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Ombi sensor platform.""" if discovery_info is None: return sensors = [] ombi = hass.data[DOMAIN]["instance"] for sensor in SENSOR_TYPES: sensor_label = sensor sensor_type = SENSOR_TYPES[sensor]["type"] sensor_icon = SENSOR_TYPES[sensor]["icon"] sensors.append(OmbiSensor(sensor_label, sensor_type, ombi, sensor_icon)) add_entities(sensors, True) class OmbiSensor(Entity): """Representation of an Ombi sensor.""" def __init__(self, label, sensor_type, ombi, icon): """Initialize the sensor.""" self._state = None self._label = label self._type = sensor_type self._ombi = ombi self._icon = icon @property def name(self): """Return the name of the sensor.""" return f"Ombi {self._type}" @property def icon(self): """Return the icon to use in the frontend.""" return self._icon @property def state(self): """Return the state of the sensor.""" return self._state def update(self): """Update the sensor.""" try: if self._label == "movies": self._state = self._ombi.movie_requests elif self._label == "tv": self._state = self._ombi.tv_requests elif self._label == "music": self._state = self._ombi.music_requests elif self._label == "pending": self._state = self._ombi.total_requests["pending"] elif self._label == "approved": self._state = self._ombi.total_requests["approved"] elif self._label == "available": self._state = self._ombi.total_requests["available"] except OmbiError as err: _LOGGER.warning("Unable to update Ombi sensor: %s", err) self._state = None
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/ombi/sensor.py
"""Util for Conversation.""" import re def create_matcher(utterance): """Create a regex that matches the utterance.""" # Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL # Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name} parts = re.split(r"({\w+}|\[[\w\s]+\] *)", utterance) # Pattern to extract name from GROUP part. Matches {name} group_matcher = re.compile(r"{(\w+)}") # Pattern to extract text from OPTIONAL part. Matches [the color] optional_matcher = re.compile(r"\[([\w ]+)\] *") pattern = ["^"] for part in parts: group_match = group_matcher.match(part) optional_match = optional_matcher.match(part) # Normal part if group_match is None and optional_match is None: pattern.append(part) continue # Group part if group_match is not None: pattern.append(r"(?P<{}>[\w ]+?)\s*".format(group_match.groups()[0])) # Optional part elif optional_match is not None: pattern.append(r"(?:{} *)?".format(optional_match.groups()[0])) pattern.append("$") return re.compile("".join(pattern), re.I)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/conversation/util.py
"""Support for Repetier-Server sensors.""" from datetime import timedelta import logging import pyrepetier import voluptuous as vol from homeassistant.const import ( CONF_API_KEY, CONF_HOST, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_PORT, CONF_SENSORS, TEMP_CELSIUS, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import load_platform from homeassistant.helpers.dispatcher import dispatcher_send from homeassistant.helpers.event import track_time_interval from homeassistant.util import slugify as util_slugify _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "RepetierServer" DOMAIN = "repetier" REPETIER_API = "repetier_api" SCAN_INTERVAL = timedelta(seconds=10) UPDATE_SIGNAL = "repetier_update_signal" TEMP_DATA = {"tempset": "temp_set", "tempread": "state", "output": "output"} API_PRINTER_METHODS = { "bed_temperature": { "offline": {"heatedbeds": None, "state": "off"}, "state": {"heatedbeds": "temp_data"}, "temp_data": TEMP_DATA, "attribute": "heatedbeds", }, "extruder_temperature": { "offline": {"extruder": None, "state": "off"}, "state": {"extruder": "temp_data"}, "temp_data": TEMP_DATA, "attribute": "extruder", }, "chamber_temperature": { "offline": {"heatedchambers": None, "state": "off"}, "state": {"heatedchambers": "temp_data"}, "temp_data": TEMP_DATA, "attribute": "heatedchambers", }, "current_state": { "offline": {"state": None}, "state": { "state": "state", "activeextruder": "active_extruder", "hasxhome": "x_homed", "hasyhome": "y_homed", "haszhome": "z_homed", "firmware": "firmware", "firmwareurl": "firmware_url", }, }, "current_job": { "offline": {"job": None, "state": "off"}, "state": { "done": "state", "job": "job_name", "jobid": "job_id", "totallines": "total_lines", "linessent": "lines_sent", "oflayer": "total_layers", "layer": "current_layer", "speedmultiply": "feed_rate", "flowmultiply": "flow", "x": "x", "y": "y", "z": "z", }, }, "job_end": { "offline": {"job": None, "state": "off", "start": None, "printtime": None}, "state": { "job": "job_name", "start": "start", "printtime": "print_time", "printedtimecomp": "from_start", }, }, "job_start": { "offline": { "job": None, "state": "off", "start": None, "printedtimecomp": None, }, "state": {"job": "job_name", "start": "start", "printedtimecomp": "from_start"}, }, } def has_all_unique_names(value): """Validate that printers have an unique name.""" names = [util_slugify(printer[CONF_NAME]) for printer in value] vol.Schema(vol.Unique())(names) return value SENSOR_TYPES = { # Type, Unit, Icon, post "bed_temperature": ["temperature", TEMP_CELSIUS, "mdi:thermometer", "_bed_"], "extruder_temperature": [ "temperature", TEMP_CELSIUS, "mdi:thermometer", "_extruder_", ], "chamber_temperature": [ "temperature", TEMP_CELSIUS, "mdi:thermometer", "_chamber_", ], "current_state": ["state", None, "mdi:printer-3d", ""], "current_job": ["progress", "%", "mdi:file-percent", "_current_job"], "job_end": ["progress", None, "mdi:clock-end", "_job_end"], "job_start": ["progress", None, "mdi:clock-start", "_job_start"], } SENSOR_SCHEMA = vol.Schema( { vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPES)] ), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=3344): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA, } ) ], has_all_unique_names, ) }, extra=vol.ALLOW_EXTRA, ) def setup(hass, config): """Set up the Repetier Server component.""" hass.data[REPETIER_API] = {} for repetier in config[DOMAIN]: _LOGGER.debug("Repetier server config %s", repetier[CONF_HOST]) url = "http://{}".format(repetier[CONF_HOST]) port = repetier[CONF_PORT] api_key = repetier[CONF_API_KEY] client = pyrepetier.Repetier(url=url, port=port, apikey=api_key) printers = client.getprinters() if not printers: return False sensors = repetier[CONF_SENSORS][CONF_MONITORED_CONDITIONS] api = PrinterAPI(hass, client, printers, sensors, repetier[CONF_NAME], config) api.update() track_time_interval(hass, api.update, SCAN_INTERVAL) hass.data[REPETIER_API][repetier[CONF_NAME]] = api return True class PrinterAPI: """Handle the printer API.""" def __init__(self, hass, client, printers, sensors, conf_name, config): """Set up instance.""" self._hass = hass self._client = client self.printers = printers self.sensors = sensors self.conf_name = conf_name self.config = config self._known_entities = set() def get_data(self, printer_id, sensor_type, temp_id): """Get data from the state cache.""" printer = self.printers[printer_id] methods = API_PRINTER_METHODS[sensor_type] for prop, offline in methods["offline"].items(): state = getattr(printer, prop) if state == offline: # if state matches offline, sensor is offline return None data = {} for prop, attr in methods["state"].items(): prop_data = getattr(printer, prop) if attr == "temp_data": temp_methods = methods["temp_data"] for temp_prop, temp_attr in temp_methods.items(): data[temp_attr] = getattr(prop_data[temp_id], temp_prop) else: data[attr] = prop_data return data def update(self, now=None): """Update the state cache from the printer API.""" for printer in self.printers: printer.get_data() self._load_entities() dispatcher_send(self._hass, UPDATE_SIGNAL) def _load_entities(self): sensor_info = [] for pidx, printer in enumerate(self.printers): for sensor_type in self.sensors: info = {} info["sensor_type"] = sensor_type info["printer_id"] = pidx info["name"] = printer.slug info["printer_name"] = self.conf_name known = f"{printer.slug}-{sensor_type}" if known in self._known_entities: continue methods = API_PRINTER_METHODS[sensor_type] if "temp_data" in methods["state"].values(): prop_data = getattr(printer, methods["attribute"]) if prop_data is None: continue for idx, _ in enumerate(prop_data): prop_info = info.copy() prop_info["temp_id"] = idx sensor_info.append(prop_info) else: info["temp_id"] = None sensor_info.append(info) self._known_entities.add(known) if not sensor_info: return load_platform(self._hass, "sensor", DOMAIN, sensor_info, self.config)
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/repetier/__init__.py
"""Support for Ambient Weather Station binary sensors.""" import logging from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.const import ATTR_NAME from . import ( SENSOR_TYPES, TYPE_BATT1, TYPE_BATT2, TYPE_BATT3, TYPE_BATT4, TYPE_BATT5, TYPE_BATT6, TYPE_BATT7, TYPE_BATT8, TYPE_BATT9, TYPE_BATT10, TYPE_BATTOUT, AmbientWeatherEntity, ) from .const import ( ATTR_LAST_DATA, ATTR_MONITORED_CONDITIONS, DATA_CLIENT, DOMAIN, TYPE_BINARY_SENSOR, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, entry, async_add_entities): """Set up Ambient PWS binary sensors based on a config entry.""" ambient = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id] binary_sensor_list = [] for mac_address, station in ambient.stations.items(): for condition in station[ATTR_MONITORED_CONDITIONS]: name, _, kind, device_class = SENSOR_TYPES[condition] if kind == TYPE_BINARY_SENSOR: binary_sensor_list.append( AmbientWeatherBinarySensor( ambient, mac_address, station[ATTR_NAME], condition, name, device_class, ) ) async_add_entities(binary_sensor_list, True) class AmbientWeatherBinarySensor(AmbientWeatherEntity, BinarySensorDevice): """Define an Ambient binary sensor.""" @property def is_on(self): """Return the status of the sensor.""" if self._sensor_type in ( TYPE_BATT1, TYPE_BATT10, TYPE_BATT2, TYPE_BATT3, TYPE_BATT4, TYPE_BATT5, TYPE_BATT6, TYPE_BATT7, TYPE_BATT8, TYPE_BATT9, TYPE_BATTOUT, ): return self._state == 0 return self._state == 1 async def async_update(self): """Fetch new state data for the entity.""" self._state = self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get( self._sensor_type )
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/ambient_station/binary_sensor.py
"""Support for ESPHome covers.""" import logging from typing import Optional from aioesphomeapi import CoverInfo, CoverOperation, CoverState from homeassistant.components.cover import ( ATTR_POSITION, ATTR_TILT_POSITION, SUPPORT_CLOSE, SUPPORT_CLOSE_TILT, SUPPORT_OPEN, SUPPORT_OPEN_TILT, SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION, SUPPORT_STOP, CoverDevice, ) from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up ESPHome covers based on a config entry.""" await platform_async_setup_entry( hass, entry, async_add_entities, component_key="cover", info_type=CoverInfo, entity_type=EsphomeCover, state_type=CoverState, ) class EsphomeCover(EsphomeEntity, CoverDevice): """A cover implementation for ESPHome.""" @property def _static_info(self) -> CoverInfo: return super()._static_info @property def supported_features(self) -> int: """Flag supported features.""" flags = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP if self._static_info.supports_position: flags |= SUPPORT_SET_POSITION if self._static_info.supports_tilt: flags |= SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION return flags @property def device_class(self) -> str: """Return the class of this device, from component DEVICE_CLASSES.""" return self._static_info.device_class @property def assumed_state(self) -> bool: """Return true if we do optimistic updates.""" return self._static_info.assumed_state @property def _state(self) -> Optional[CoverState]: return super()._state # https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property # pylint: disable=invalid-overridden-method @esphome_state_property def is_closed(self) -> Optional[bool]: """Return if the cover is closed or not.""" # Check closed state with api version due to a protocol change return self._state.is_closed(self._client.api_version) @esphome_state_property def is_opening(self) -> bool: """Return if the cover is opening or not.""" return self._state.current_operation == CoverOperation.IS_OPENING @esphome_state_property def is_closing(self) -> bool: """Return if the cover is closing or not.""" return self._state.current_operation == CoverOperation.IS_CLOSING @esphome_state_property def current_cover_position(self) -> Optional[int]: """Return current position of cover. 0 is closed, 100 is open.""" if not self._static_info.supports_position: return None return round(self._state.position * 100.0) @esphome_state_property def current_cover_tilt_position(self) -> Optional[float]: """Return current position of cover tilt. 0 is closed, 100 is open.""" if not self._static_info.supports_tilt: return None return self._state.tilt * 100.0 async def async_open_cover(self, **kwargs) -> None: """Open the cover.""" await self._client.cover_command(key=self._static_info.key, position=1.0) async def async_close_cover(self, **kwargs) -> None: """Close cover.""" await self._client.cover_command(key=self._static_info.key, position=0.0) async def async_stop_cover(self, **kwargs) -> None: """Stop the cover.""" await self._client.cover_command(key=self._static_info.key, stop=True) async def async_set_cover_position(self, **kwargs) -> None: """Move the cover to a specific position.""" await self._client.cover_command( key=self._static_info.key, position=kwargs[ATTR_POSITION] / 100 ) async def async_open_cover_tilt(self, **kwargs) -> None: """Open the cover tilt.""" await self._client.cover_command(key=self._static_info.key, tilt=1.0) async def async_close_cover_tilt(self, **kwargs) -> None: """Close the cover tilt.""" await self._client.cover_command(key=self._static_info.key, tilt=0.0) async def async_set_cover_tilt_position(self, **kwargs) -> None: """Move the cover tilt to a specific position.""" await self._client.cover_command( key=self._static_info.key, tilt=kwargs[ATTR_TILT_POSITION] / 100 )
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/esphome/cover.py
"""Support to graphs card in the UI.""" import logging import voluptuous as vol from homeassistant.const import ATTR_ENTITY_ID, CONF_ENTITIES, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent _LOGGER = logging.getLogger(__name__) DOMAIN = "history_graph" CONF_HOURS_TO_SHOW = "hours_to_show" CONF_REFRESH = "refresh" ATTR_HOURS_TO_SHOW = CONF_HOURS_TO_SHOW ATTR_REFRESH = CONF_REFRESH GRAPH_SCHEMA = vol.Schema( { vol.Required(CONF_ENTITIES): cv.entity_ids, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_HOURS_TO_SHOW, default=24): vol.Range(min=1), vol.Optional(CONF_REFRESH, default=0): vol.Range(min=0), } ) CONFIG_SCHEMA = vol.Schema( {DOMAIN: cv.schema_with_slug_keys(GRAPH_SCHEMA)}, extra=vol.ALLOW_EXTRA ) async def async_setup(hass, config): """Load graph configurations.""" _LOGGER.warning( "The history_graph integration has been deprecated and is pending for removal " "in Home Assistant 0.107.0." ) component = EntityComponent(_LOGGER, DOMAIN, hass) graphs = [] for object_id, cfg in config[DOMAIN].items(): name = cfg.get(CONF_NAME, object_id) graph = HistoryGraphEntity(name, cfg) graphs.append(graph) await component.async_add_entities(graphs) return True class HistoryGraphEntity(Entity): """Representation of a graph entity.""" def __init__(self, name, cfg): """Initialize the graph.""" self._name = name self._hours = cfg[CONF_HOURS_TO_SHOW] self._refresh = cfg[CONF_REFRESH] self._entities = cfg[CONF_ENTITIES] @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the entity.""" return self._name @property def state_attributes(self): """Return the state attributes.""" attrs = { ATTR_HOURS_TO_SHOW: self._hours, ATTR_REFRESH: self._refresh, ATTR_ENTITY_ID: self._entities, } return attrs
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/history_graph/__init__.py
"""Provides device automations for Lock.""" from typing import List import voluptuous as vol from homeassistant.components.automation import AutomationActionType, state from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA from homeassistant.const import ( CONF_DEVICE_ID, CONF_DOMAIN, CONF_ENTITY_ID, CONF_PLATFORM, CONF_TYPE, STATE_LOCKED, STATE_UNLOCKED, ) from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers import config_validation as cv, entity_registry from homeassistant.helpers.typing import ConfigType from . import DOMAIN TRIGGER_TYPES = {"locked", "unlocked"} TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES), } ) async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]: """List device triggers for Lock devices.""" registry = await entity_registry.async_get_registry(hass) triggers = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue # Add triggers for each entity that belongs to this integration triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "locked", } ) triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "unlocked", } ) return triggers async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" config = TRIGGER_SCHEMA(config) if config[CONF_TYPE] == "locked": from_state = STATE_UNLOCKED to_state = STATE_LOCKED else: from_state = STATE_LOCKED to_state = STATE_UNLOCKED state_config = { state.CONF_PLATFORM: "state", CONF_ENTITY_ID: config[CONF_ENTITY_ID], state.CONF_FROM: from_state, state.CONF_TO: to_state, } state_config = state.TRIGGER_SCHEMA(state_config) return await state.async_attach_trigger( hass, state_config, action, automation_info, platform_type="device" )
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/lock/device_trigger.py
"""Sensor platform support for yeelight.""" import logging from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from . import DATA_UPDATED, DATA_YEELIGHT _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Yeelight sensors.""" if not discovery_info: return device = hass.data[DATA_YEELIGHT][discovery_info["host"]] if device.is_nightlight_supported: _LOGGER.debug("Adding nightlight mode sensor for %s", device.name) add_entities([YeelightNightlightModeSensor(device)]) class YeelightNightlightModeSensor(BinarySensorDevice): """Representation of a Yeelight nightlight mode sensor.""" def __init__(self, device): """Initialize nightlight mode sensor.""" self._device = device @callback def _schedule_immediate_update(self): self.async_schedule_update_ha_state() async def async_added_to_hass(self): """Handle entity which will be added.""" async_dispatcher_connect( self.hass, DATA_UPDATED.format(self._device.ipaddr), self._schedule_immediate_update, ) @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the sensor.""" return f"{self._device.name} nightlight" @property def is_on(self): """Return true if nightlight mode is on.""" return self._device.is_nightlight_enabled
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/yeelight/binary_sensor.py
"""Provides device automations for Fan.""" from typing import List import voluptuous as vol from homeassistant.components.automation import AutomationActionType, state from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA from homeassistant.const import ( CONF_DEVICE_ID, CONF_DOMAIN, CONF_ENTITY_ID, CONF_PLATFORM, CONF_TYPE, STATE_OFF, STATE_ON, ) from homeassistant.core import CALLBACK_TYPE, HomeAssistant from homeassistant.helpers import config_validation as cv, entity_registry from homeassistant.helpers.typing import ConfigType from . import DOMAIN TRIGGER_TYPES = {"turned_on", "turned_off"} TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_ENTITY_ID): cv.entity_id, vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES), } ) async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]: """List device triggers for Fan devices.""" registry = await entity_registry.async_get_registry(hass) triggers = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue # Add triggers for each entity that belongs to this integration triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_on", } ) triggers.append( { CONF_PLATFORM: "device", CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "turned_off", } ) return triggers async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" config = TRIGGER_SCHEMA(config) if config[CONF_TYPE] == "turned_on": from_state = STATE_OFF to_state = STATE_ON else: from_state = STATE_ON to_state = STATE_OFF state_config = { state.CONF_PLATFORM: "state", CONF_ENTITY_ID: config[CONF_ENTITY_ID], state.CONF_FROM: from_state, state.CONF_TO: to_state, } state_config = state.TRIGGER_SCHEMA(state_config) return await state.async_attach_trigger( hass, state_config, action, automation_info, platform_type="device" )
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/fan/device_trigger.py
"""Support for interacting with Vultr subscriptions.""" import logging import voluptuous as vol from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice from homeassistant.const import CONF_NAME import homeassistant.helpers.config_validation as cv from . import ( ATTR_ALLOWED_BANDWIDTH, ATTR_AUTO_BACKUPS, ATTR_COST_PER_MONTH, ATTR_CREATED_AT, ATTR_DISK, ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY, ATTR_OS, ATTR_REGION, ATTR_SUBSCRIPTION_ID, ATTR_SUBSCRIPTION_NAME, ATTR_VCPUS, CONF_SUBSCRIPTION, DATA_VULTR, ) _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Vultr {}" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SUBSCRIPTION): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Vultr subscription switch.""" vultr = hass.data[DATA_VULTR] subscription = config.get(CONF_SUBSCRIPTION) name = config.get(CONF_NAME) if subscription not in vultr.data: _LOGGER.error("Subscription %s not found", subscription) return False add_entities([VultrSwitch(vultr, subscription, name)], True) class VultrSwitch(SwitchDevice): """Representation of a Vultr subscription switch.""" def __init__(self, vultr, subscription, name): """Initialize a new Vultr switch.""" self._vultr = vultr self._name = name self.subscription = subscription self.data = None @property def name(self): """Return the name of the switch.""" try: return self._name.format(self.data["label"]) except (TypeError, KeyError): return self._name @property def is_on(self): """Return true if switch is on.""" return self.data["power_status"] == "running" @property def icon(self): """Return the icon of this server.""" return "mdi:server" if self.is_on else "mdi:server-off" @property def device_state_attributes(self): """Return the state attributes of the Vultr subscription.""" return { ATTR_ALLOWED_BANDWIDTH: self.data.get("allowed_bandwidth_gb"), ATTR_AUTO_BACKUPS: self.data.get("auto_backups"), ATTR_COST_PER_MONTH: self.data.get("cost_per_month"), ATTR_CREATED_AT: self.data.get("date_created"), ATTR_DISK: self.data.get("disk"), ATTR_IPV4_ADDRESS: self.data.get("main_ip"), ATTR_IPV6_ADDRESS: self.data.get("v6_main_ip"), ATTR_MEMORY: self.data.get("ram"), ATTR_OS: self.data.get("os"), ATTR_REGION: self.data.get("location"), ATTR_SUBSCRIPTION_ID: self.data.get("SUBID"), ATTR_SUBSCRIPTION_NAME: self.data.get("label"), ATTR_VCPUS: self.data.get("vcpu_count"), } def turn_on(self, **kwargs): """Boot-up the subscription.""" if self.data["power_status"] != "running": self._vultr.start(self.subscription) def turn_off(self, **kwargs): """Halt the subscription.""" if self.data["power_status"] == "running": self._vultr.halt(self.subscription) def update(self): """Get the latest data from the device and update the data.""" self._vultr.update() self.data = self._vultr.data[self.subscription]
"""The tests for the Template fan platform.""" import logging import pytest import voluptuous as vol from homeassistant import setup from homeassistant.components.fan import ( ATTR_DIRECTION, ATTR_OSCILLATING, ATTR_SPEED, DIRECTION_FORWARD, DIRECTION_REVERSE, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, ) from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from tests.common import assert_setup_component, async_mock_service from tests.components.fan import common _LOGGER = logging.getLogger(__name__) _TEST_FAN = "fan.test_fan" # Represent for fan's state _STATE_INPUT_BOOLEAN = "input_boolean.state" # Represent for fan's state _STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state" # Represent for fan's speed _SPEED_INPUT_SELECT = "input_select.speed" # Represent for fan's oscillating _OSC_INPUT = "input_select.osc" # Represent for fan's direction _DIRECTION_INPUT_SELECT = "input_select.direction" @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") # Configuration tests # async def test_missing_optional_config(hass, calls): """Test: missing optional template is ok.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, None, None, None) async def test_missing_value_template_config(hass, calls): """Test: missing 'value_template' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_on_config(hass, calls): """Test: missing 'turn_on' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_missing_turn_off_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] async def test_invalid_config(hass, calls): """Test: missing 'turn_off' will fail.""" with assert_setup_component(0, "fan"): assert await setup.async_setup_component( hass, "fan", { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "turn_on": {"service": "script.fan_on"}, } }, }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.async_all() == [] # End of configuration tests # # Template tests # async def test_templates_with_entities(hass, calls): """Test tempalates with values from other entities.""" value_template = """ {% if is_state('input_boolean.state', 'True') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) hass.states.async_set(_STATE_INPUT_BOOLEAN, True) hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM) hass.states.async_set(_OSC_INPUT, "True") hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD) await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_availability_template_with_entities(hass, calls): """Test availability tempalates with values from other entities.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "availability_template": "{{ is_state('availability_boolean.state', 'on') }}", "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() # When template returns true.. hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE async def test_templates_with_valid_values(hass, calls): """Test templates with valid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "speed_template": "{{ 'medium' }}", "oscillating_template": "{{ 1 == 1 }}", "direction_template": "{{ 'forward' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD) async def test_templates_invalid_values(hass, calls): """Test templates with invalid values.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'abc' }}", "speed_template": "{{ '0' }}", "oscillating_template": "{{ 'xyz' }}", "direction_template": "{{ 'right' }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() _verify(hass, STATE_OFF, None, None, None) async def test_invalid_availability_template_keeps_component_available(hass, caplog): """Test that an invalid availability keeps the device available.""" with assert_setup_component(1, "fan"): assert await setup.async_setup_component( hass, "fan", { "fan": { "platform": "template", "fans": { "test_fan": { "value_template": "{{ 'on' }}", "availability_template": "{{ x - 12 }}", "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": {"service": "script.fan_on"}, "turn_off": {"service": "script.fan_off"}, } }, } }, ) await hass.async_start() await hass.async_block_till_done() assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE assert ("Could not render availability_template template") in caplog.text assert ("UndefinedError: 'x' is undefined") in caplog.text # End of template tests # # Function tests # async def test_on_off(hass, calls): """Test turn on and turn off.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON _verify(hass, STATE_ON, None, None, None) # Turn off fan await common.async_turn_off(hass, _TEST_FAN) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF _verify(hass, STATE_OFF, None, None, None) async def test_on_with_speed(hass, calls): """Test turn on with speed.""" await _register_components(hass) # Turn on fan with high speed await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_set_speed(hass, calls): """Test set valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to medium await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM _verify(hass, STATE_ON, SPEED_MEDIUM, None, None) async def test_set_invalid_speed_from_initial_stage(hass, calls): """Test set invalid speed when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_speed(hass, calls): """Test set invalid speed when fan has valid speed.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to high await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH) # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) # Set fan's speed to 'invalid' await common.async_set_speed(hass, _TEST_FAN, "invalid") # verify speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH _verify(hass, STATE_ON, SPEED_HIGH, None, None) async def test_custom_speed_list(hass, calls): """Test set custom speed list.""" await _register_components(hass, ["1", "2", "3"]) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's speed to '1' await common.async_set_speed(hass, _TEST_FAN, "1") # verify assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) # Set fan's speed to 'medium' which is invalid await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM) # verify that speed is unchanged assert hass.states.get(_SPEED_INPUT_SELECT).state == "1" _verify(hass, STATE_ON, "1", None, None) async def test_set_osc(hass, calls): """Test set oscillating.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to False await common.async_oscillate(hass, _TEST_FAN, False) # verify assert hass.states.get(_OSC_INPUT).state == "False" _verify(hass, STATE_ON, None, False, None) async def test_set_invalid_osc_from_initial_state(hass, calls): """Test set invalid oscillating when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to 'invalid' with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, "invalid") # verify assert hass.states.get(_OSC_INPUT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_osc(hass, calls): """Test set invalid oscillating when fan has valid osc.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's osc to True await common.async_oscillate(hass, _TEST_FAN, True) # verify assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) # Set fan's osc to None with pytest.raises(vol.Invalid): await common.async_oscillate(hass, _TEST_FAN, None) # verify osc is unchanged assert hass.states.get(_OSC_INPUT).state == "True" _verify(hass, STATE_ON, None, True, None) async def test_set_direction(hass, calls): """Test set valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to reverse await common.async_set_direction(hass, _TEST_FAN, DIRECTION_REVERSE) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_REVERSE _verify(hass, STATE_ON, None, None, DIRECTION_REVERSE) async def test_set_invalid_direction_from_initial_stage(hass, calls): """Test set invalid direction when fan is in initial state.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == "" _verify(hass, STATE_ON, None, None, None) async def test_set_invalid_direction(hass, calls): """Test set invalid direction when fan has valid direction.""" await _register_components(hass) # Turn on fan await common.async_turn_on(hass, _TEST_FAN) # Set fan's direction to forward await common.async_set_direction(hass, _TEST_FAN, DIRECTION_FORWARD) # verify assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) # Set fan's direction to 'invalid' await common.async_set_direction(hass, _TEST_FAN, "invalid") # verify direction is unchanged assert hass.states.get(_DIRECTION_INPUT_SELECT).state == DIRECTION_FORWARD _verify(hass, STATE_ON, None, None, DIRECTION_FORWARD) def _verify( hass, expected_state, expected_speed, expected_oscillating, expected_direction ): """Verify fan's state, speed and osc.""" state = hass.states.get(_TEST_FAN) attributes = state.attributes assert state.state == expected_state assert attributes.get(ATTR_SPEED, None) == expected_speed assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating assert attributes.get(ATTR_DIRECTION, None) == expected_direction async def _register_components(hass, speed_list=None): """Register basic components for testing.""" with assert_setup_component(1, "input_boolean"): assert await setup.async_setup_component( hass, "input_boolean", {"input_boolean": {"state": None}} ) with assert_setup_component(3, "input_select"): assert await setup.async_setup_component( hass, "input_select", { "input_select": { "speed": { "name": "Speed", "options": [ "", SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, "1", "2", "3", ], }, "osc": {"name": "oscillating", "options": ["", "True", "False"]}, "direction": { "name": "Direction", "options": ["", DIRECTION_FORWARD, DIRECTION_REVERSE], }, } }, ) with assert_setup_component(1, "fan"): value_template = """ {% if is_state('input_boolean.state', 'on') %} {{ 'on' }} {% else %} {{ 'off' }} {% endif %} """ test_fan_config = { "value_template": value_template, "speed_template": "{{ states('input_select.speed') }}", "oscillating_template": "{{ states('input_select.osc') }}", "direction_template": "{{ states('input_select.direction') }}", "turn_on": { "service": "input_boolean.turn_on", "entity_id": _STATE_INPUT_BOOLEAN, }, "turn_off": { "service": "input_boolean.turn_off", "entity_id": _STATE_INPUT_BOOLEAN, }, "set_speed": { "service": "input_select.select_option", "data_template": { "entity_id": _SPEED_INPUT_SELECT, "option": "{{ speed }}", }, }, "set_oscillating": { "service": "input_select.select_option", "data_template": { "entity_id": _OSC_INPUT, "option": "{{ oscillating }}", }, }, "set_direction": { "service": "input_select.select_option", "data_template": { "entity_id": _DIRECTION_INPUT_SELECT, "option": "{{ direction }}", }, }, } if speed_list: test_fan_config["speeds"] = speed_list assert await setup.async_setup_component( hass, "fan", {"fan": {"platform": "template", "fans": {"test_fan": test_fan_config}}}, ) await hass.async_start() await hass.async_block_till_done()
Teagan42/home-assistant
tests/components/template/test_fan.py
homeassistant/components/vultr/switch.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains functions for reading and writing HDF5 tables that are not meant to be used directly, but instead are available as readers/writers in `astropy.table`. See :ref:`table_io` for more details. """ import os import warnings import numpy as np # NOTE: Do not import anything from astropy.table here. # https://github.com/astropy/astropy/issues/6604 from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n' META_KEY = '__table_column_meta__' __all__ = ['read_table_hdf5', 'write_table_hdf5'] def meta_path(path): return path + '.' + META_KEY def _find_all_structured_arrays(handle): """ Find all structured arrays in an HDF5 file """ import h5py structured_arrays = [] def append_structured_arrays(name, obj): if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V': structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays def is_hdf5(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() try: signature = fileobj.read(8) finally: fileobj.seek(loc) return signature == HDF5_SIGNATURE elif filepath is not None: return filepath.endswith(('.hdf5', '.h5')) try: import h5py except ImportError: return False else: return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset)) def read_table_hdf5(input, path=None, character_as_bytes=True): """ Read a Table object from an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one table is present in the HDF5 file or group, the first table is read in and a warning is displayed. Parameters ---------- input : str or :class:`h5py:File` or :class:`h5py:Group` or :class:`h5py:Dataset` If a string, the filename to read the table from. If an h5py object, either the file or the group object to read the table from. path : str The path from which to read the table inside the HDF5 file. This should be relative to the input file or group. character_as_bytes: boolean If `True` then Table columns are left as bytes. If `False` then Table columns are converted to unicode. """ try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") # This function is iterative, and only gets to writing the file when # the input is an hdf5 Group. Moreover, the input variable is changed in # place. # Here, we save its value to be used at the end when the conditions are # right. input_save = input if isinstance(input, (h5py.File, h5py.Group)): # If a path was specified, follow the path if path is not None: try: input = input[path] except (KeyError, ValueError): raise OSError(f"Path {path} does not exist") # `input` is now either a group or a dataset. If it is a group, we # will search for all structured arrays inside the group, and if there # is one we can proceed otherwise an error is raised. If it is a # dataset, we just proceed with the reading. if isinstance(input, h5py.Group): # Find all structured arrays in group arrays = _find_all_structured_arrays(input) if len(arrays) == 0: raise ValueError("no table found in HDF5 group {}". format(path)) elif len(arrays) > 0: path = arrays[0] if path is None else path + '/' + arrays[0] if len(arrays) > 1: warnings.warn("path= was not specified but multiple tables" " are present, reading in first available" " table (path={})".format(path), AstropyUserWarning) return read_table_hdf5(input, path=path) elif not isinstance(input, h5py.Dataset): # If a file object was passed, then we need to extract the filename # because h5py cannot properly read in file objects. if hasattr(input, 'read'): try: input = input.name except AttributeError: raise TypeError("h5py can only open regular files") # Open the file for reading, and recursively call read_table_hdf5 with # the file object and the path. f = h5py.File(input, 'r') try: return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes) finally: f.close() # If we are here, `input` should be a Dataset object, which we can now # convert to a Table. # Create a Table object from astropy.table import Table, meta, serialize table = Table(np.array(input)) # Read the meta-data from the file. For back-compatibility, we can read # the old file format where the serialized metadata were saved in the # attributes of the HDF5 dataset. # In the new format, instead, metadata are stored in a new dataset in the # same file. This is introduced in Astropy 3.0 old_version_meta = META_KEY in input.attrs new_version_meta = path is not None and meta_path(path) in input_save if old_version_meta or new_version_meta: if new_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input_save[meta_path(path)]) elif old_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input.attrs[META_KEY]) if 'meta' in list(header.keys()): table.meta = header['meta'] header_cols = dict((x['name'], x) for x in header['datatype']) for col in table.columns.values(): for attr in ('description', 'format', 'unit', 'meta'): if attr in header_cols[col.name]: setattr(col, attr, header_cols[col.name][attr]) # Construct new table with mixins, using tbl.meta['__serialized_columns__'] # as guidance. table = serialize._construct_mixins_from_columns(table) else: # Read the meta-data from the file table.meta.update(input.attrs) if not character_as_bytes: table.convert_bytestring_to_unicode() return table def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from astropy.table import serialize from astropy.table.table import has_info_class from astropy import units as u from astropy.utils.data_info import MixinInfo, serialize_context_as # If PyYAML is not available then check to see if there are any mixin cols # that *require* YAML serialization. HDF5 already has support for # Quantity, so if those are the only mixins the proceed without doing the # YAML bit, for backward compatibility (i.e. not requiring YAML to write # Quantity). try: import yaml except ImportError: for col in tbl.itercols(): if (has_info_class(col, MixinInfo) and col.__class__ is not u.Quantity): raise TypeError("cannot write type {} column '{}' " "to HDF5 without PyYAML installed." .format(col.__class__.__name__, col.info.name)) # Convert the table to one with no mixins, only Column objects. This adds # meta data which is extracted with meta.get_yaml_from_table. with serialize_context_as('hdf5'): encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl def write_table_hdf5(table, output, path=None, compression=False, append=False, overwrite=False, serialize_meta=False): """ Write a Table object to an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. Parameters ---------- table : `~astropy.table.Table` Data table that is to be written to file. output : str or :class:`h5py:File` or :class:`h5py:Group` If a string, the filename to write the table to. If an h5py object, either the file or the group object to write the table to. path : str The path to which to write the table inside the HDF5 file. This should be relative to the input file or group. If not specified, defaults to ``__astropy_table__``. compression : bool or str or int Whether to compress the table inside the HDF5 file. If set to `True`, ``'gzip'`` compression is used. If a string is specified, it should be one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is specified (in the range 0-9), ``'gzip'`` compression is used, and the integer denotes the compression level. append : bool Whether to append the table to an existing HDF5 file. overwrite : bool Whether to overwrite any existing file without warning. If ``append=True`` and ``overwrite=True`` then only the dataset will be replaced; the file/group will not be overwritten. """ from astropy.table import meta try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") if path is None: # table is just an arbitrary, hardcoded string here. path = '__astropy_table__' elif path.endswith('/'): raise ValueError("table path should end with table name, not /") if '/' in path: group, name = path.rsplit('/', 1) else: group, name = None, path if isinstance(output, (h5py.File, h5py.Group)): if len(list(output.keys())) > 0 and name == '__astropy_table__': raise ValueError("table path should always be set via the " "path= argument when writing to existing " "files") elif name == '__astropy_table__': warnings.warn("table path was not set via the path= argument; " "using default path {}".format(path)) if group: try: output_group = output[group] except (KeyError, ValueError): output_group = output.create_group(group) else: output_group = output elif isinstance(output, str): if os.path.exists(output) and not append: if overwrite and not append: os.remove(output) else: raise OSError(f"File exists: {output}") # Open the file for appending or writing f = h5py.File(output, 'a' if append else 'w') # Recursively call the write function try: return write_table_hdf5(table, f, path=path, compression=compression, append=append, overwrite=overwrite, serialize_meta=serialize_meta) finally: f.close() else: raise TypeError('output should be a string or an h5py File or ' 'Group object') # Check whether table already exists if name in output_group: if append and overwrite: # Delete only the dataset itself del output_group[name] else: raise OSError(f"Table {path} already exists") # Encode any mixin columns as plain columns + appropriate metadata table = _encode_mixins(table) # Table with numpy unicode strings can't be written in HDF5 so # to write such a table a copy of table is made containing columns as # bytestrings. Now this copy of the table can be written in HDF5. if any(col.info.dtype.kind == 'U' for col in table.itercols()): table = table.copy(copy_data=False) table.convert_unicode_to_bytestring() # Warn if information will be lost when serialize_meta=False. This is # hardcoded to the set difference between column info attributes and what # HDF5 can store natively (name, dtype) with no meta. if serialize_meta is False: for col in table.itercols(): for attr in ('unit', 'format', 'description', 'meta'): if getattr(col.info, attr, None) not in (None, {}): warnings.warn("table contains column(s) with defined 'unit', 'format'," " 'description', or 'meta' info attributes. These will" " be dropped since serialize_meta=False.", AstropyUserWarning) # Write the table to the file if compression: if compression is True: compression = 'gzip' dset = output_group.create_dataset(name, data=table.as_array(), compression=compression) else: dset = output_group.create_dataset(name, data=table.as_array()) if serialize_meta: header_yaml = meta.get_yaml_from_table(table) header_encoded = [h.encode('utf-8') for h in header_yaml] output_group.create_dataset(meta_path(name), data=header_encoded) else: # Write the Table meta dict key:value pairs to the file as HDF5 # attributes. This works only for a limited set of scalar data types # like numbers, strings, etc., but not any complex types. This path # also ignores column meta like unit or format. for key in table.meta: val = table.meta[key] try: dset.attrs[key] = val except TypeError: warnings.warn("Attribute `{}` of type {} cannot be written to " "HDF5 files - skipping. (Consider specifying " "serialize_meta=True to write all meta data)".format(key, type(val)), AstropyUserWarning) def register_hdf5(): """ Register HDF5 with Unified I/O. """ from astropy.io import registry as io_registry from astropy.table import Table io_registry.register_reader('hdf5', Table, read_table_hdf5) io_registry.register_writer('hdf5', Table, write_table_hdf5) io_registry.register_identifier('hdf5', Table, is_hdf5)
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/io/misc/hdf5.py
# Licensed under a 3-clause BSD style license - see PYFITS.rst import collections import copy import itertools import re import warnings from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED from .file import _File from .util import (encode_ascii, decode_ascii, fileobj_closed, fileobj_is_binary, path_like) from ._utils import parse_header from astropy.utils import isiterable from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.decorators import deprecated_renamed_argument BLOCK_SIZE = 2880 # the FITS block size # This regular expression can match a *valid* END card which just consists of # the string 'END' followed by all spaces, or an *invalid* end card which # consists of END, followed by any character that is *not* a valid character # for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which # starts with 'END' but is not 'END'), followed by any arbitrary bytes. An # invalid end card may also consist of just 'END' with no trailing bytes. HEADER_END_RE = re.compile(encode_ascii( r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])')) # According to the FITS standard the only characters that may appear in a # header record are the restricted ASCII chars from 0x20 through 0x7E. VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F))) END_CARD = 'END' + ' ' * 77 __doctest_skip__ = ['Header', 'Header.comments', 'Header.fromtextfile', 'Header.totextfile', 'Header.set', 'Header.update'] class Header: """ FITS header class. This class exposes both a dict-like interface and a list-like interface to FITS headers. The header may be indexed by keyword and, like a dict, the associated value will be returned. When the header contains cards with duplicate keywords, only the value of the first card with the given keyword will be returned. It is also possible to use a 2-tuple as the index in the form (keyword, n)--this returns the n-th value with that keyword, in the case where there are duplicate keywords. For example:: >>> header['NAXIS'] 0 >>> header[('FOO', 1)] # Return the value of the second FOO keyword 'foo' The header may also be indexed by card number:: >>> header[0] # Return the value of the first card in the header 'T' Commentary keywords such as HISTORY and COMMENT are special cases: When indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all the HISTORY/COMMENT values is returned:: >>> header['HISTORY'] This is the first history entry in this header. This is the second history entry in this header. ... See the Astropy documentation for more details on working with headers. """ def __init__(self, cards=[], copy=False): """ Construct a `Header` from an iterable and/or text file. Parameters ---------- cards : A list of `Card` objects, optional The cards to initialize the header with. Also allowed are other `Header` (or `dict`-like) objects. .. versionchanged:: 1.2 Allowed ``cards`` to be a `dict`-like object. copy : bool, optional If ``True`` copies the ``cards`` if they were another `Header` instance. Default is ``False``. .. versionadded:: 1.3 """ self.clear() if isinstance(cards, Header): if copy: cards = cards.copy() cards = cards.cards elif isinstance(cards, dict): cards = cards.items() for card in cards: self.append(card, end=True) self._modified = False def __len__(self): return len(self._cards) def __iter__(self): for card in self._cards: yield card.keyword def __contains__(self, keyword): if keyword in self._keyword_indices or keyword in self._rvkc_indices: # For the most common case (single, standard form keyword lookup) # this will work and is an O(1) check. If it fails that doesn't # guarantee absence, just that we have to perform the full set of # checks in self._cardindex return True try: self._cardindex(keyword) except (KeyError, IndexError): return False return True def __getitem__(self, key): if isinstance(key, slice): return self.__class__([copy.copy(c) for c in self._cards[key]]) elif self._haswildcard(key): return self.__class__([copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]) elif (isinstance(key, str) and key.upper() in Card._commentary_keywords): key = key.upper() # Special case for commentary cards return _HeaderCommentaryCards(self, key) if isinstance(key, tuple): keyword = key[0] else: keyword = key card = self._cards[self._cardindex(key)] if card.field_specifier is not None and keyword == card.rawkeyword: # This is RVKC; if only the top-level keyword was specified return # the raw value, not the parsed out float value return card.rawvalue value = card.value if value == UNDEFINED: return None return value def __setitem__(self, key, value): if self._set_slice(key, value, self): return if isinstance(value, tuple): if not (0 < len(value) <= 2): raise ValueError( 'A Header item may be set with either a scalar value, ' 'a 1-tuple containing a scalar value, or a 2-tuple ' 'containing a scalar value and comment string.') if len(value) == 1: value, comment = value[0], None if value is None: value = UNDEFINED elif len(value) == 2: value, comment = value if value is None: value = UNDEFINED if comment is None: comment = '' else: comment = None card = None if isinstance(key, int): card = self._cards[key] elif isinstance(key, tuple): card = self._cards[self._cardindex(key)] if value is None: value = UNDEFINED if card: card.value = value if comment is not None: card.comment = comment if card._modified: self._modified = True else: # If we get an IndexError that should be raised; we don't allow # assignment to non-existing indices self._update((key, value, comment)) def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # This is very inefficient but it's not a commonly used feature. # If someone out there complains that they make heavy use of slice # deletions and it's too slow, well, we can worry about it then # [the solution is not too complicated--it would be wait 'til all # the cards are deleted before updating _keyword_indices rather # than updating it once for each card that gets deleted] if isinstance(key, slice): indices = range(*key.indices(len(self))) # If the slice step is backwards we want to reverse it, because # it will be reversed in a few lines... if key.step and key.step < 0: indices = reversed(indices) else: indices = self._wildcardmatch(key) for idx in reversed(indices): del self[idx] return elif isinstance(key, str): # delete ALL cards with the same keyword name key = Card.normalize_keyword(key) indices = self._keyword_indices if key not in self._keyword_indices: indices = self._rvkc_indices if key not in indices: # if keyword is not present raise KeyError. # To delete keyword without caring if they were present, # Header.remove(Keyword) can be used with optional argument ignore_missing as True raise KeyError(f"Keyword '{key}' not found.") for idx in reversed(indices[key]): # Have to copy the indices list since it will be modified below del self[idx] return idx = self._cardindex(key) card = self._cards[idx] keyword = card.keyword del self._cards[idx] keyword = Card.normalize_keyword(keyword) indices = self._keyword_indices[keyword] indices.remove(idx) if not indices: del self._keyword_indices[keyword] # Also update RVKC indices if necessary :/ if card.field_specifier is not None: indices = self._rvkc_indices[card.rawkeyword] indices.remove(idx) if not indices: del self._rvkc_indices[card.rawkeyword] # We also need to update all other indices self._updateindices(idx, increment=False) self._modified = True def __repr__(self): return self.tostring(sep='\n', endcard=False, padding=False) def __str__(self): return self.tostring() def __eq__(self, other): """ Two Headers are equal only if they have the exact same string representation. """ return str(self) == str(other) def __add__(self, other): temp = self.copy(strip=False) temp.extend(other) return temp def __iadd__(self, other): self.extend(other) return self def _ipython_key_completions_(self): return self.__iter__() @property def cards(self): """ The underlying physical cards that make up this Header; it can be looked at, but it should not be modified directly. """ return _CardAccessor(self) @property def comments(self): """ View the comments associated with each keyword, if any. For example, to see the comment on the NAXIS keyword: >>> header.comments['NAXIS'] number of data axes Comments can also be updated through this interface: >>> header.comments['NAXIS'] = 'Number of data axes' """ return _HeaderComments(self) @property def _modified(self): """ Whether or not the header has been modified; this is a property so that it can also check each card for modifications--cards may have been modified directly without the header containing it otherwise knowing. """ modified_cards = any(c._modified for c in self._cards) if modified_cards: # If any cards were modified then by definition the header was # modified self.__dict__['_modified'] = True return self.__dict__['_modified'] @_modified.setter def _modified(self, val): self.__dict__['_modified'] = val @classmethod def fromstring(cls, data, sep=''): """ Creates an HDU header from a byte string containing the entire header data. Parameters ---------- data : str or bytes String or bytes containing the entire header. In the case of bytes they will be decoded using latin-1 (only plain ASCII characters are allowed in FITS headers but latin-1 allows us to retain any invalid bytes that might appear in malformatted FITS files). sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). In general this is only used in cases where a header was printed as text (e.g. with newlines after each card) and you want to create a new `Header` from it by copy/pasting. Examples -------- >>> from astropy.io.fits import Header >>> hdr = Header({'SIMPLE': True}) >>> Header.fromstring(hdr.tostring()) == hdr True If you want to create a `Header` from printed text it's not necessary to have the exact binary structure as it would appear in a FITS file, with the full 80 byte card length. Rather, each "card" can end in a newline and does not have to be padded out to a full card length as long as it "looks like" a FITS header: >>> hdr = Header.fromstring(\"\"\"\\ ... SIMPLE = T / conforms to FITS standard ... BITPIX = 8 / array data type ... NAXIS = 0 / number of array dimensions ... EXTEND = T ... \"\"\", sep='\\n') >>> hdr['SIMPLE'] True >>> hdr['BITPIX'] 8 >>> len(hdr) 4 Returns ------- header A new `Header` instance. """ cards = [] # If the card separator contains characters that may validly appear in # a card, the only way to unambiguously distinguish between cards is to # require that they be Card.length long. However, if the separator # contains non-valid characters (namely \n) the cards may be split # immediately at the separator require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS) if isinstance(data, bytes): # FITS supports only ASCII, but decode as latin1 and just take all # bytes for now; if it results in mojibake due to e.g. UTF-8 # encoded data in a FITS header that's OK because it shouldn't be # there in the first place--accepting it here still gives us the # opportunity to display warnings later during validation CONTINUE = b'CONTINUE' END = b'END' end_card = END_CARD.encode('ascii') sep = sep.encode('latin1') empty = b'' else: CONTINUE = 'CONTINUE' END = 'END' end_card = END_CARD empty = '' # Split the header into individual cards idx = 0 image = [] while idx < len(data): if require_full_cardlength: end_idx = idx + Card.length else: try: end_idx = data.index(sep, idx) except ValueError: end_idx = len(data) next_image = data[idx:end_idx] idx = end_idx + len(sep) if image: if next_image[:8] == CONTINUE: image.append(next_image) continue cards.append(Card.fromstring(empty.join(image))) if require_full_cardlength: if next_image == end_card: image = [] break else: if next_image.split(sep)[0].rstrip() == END: image = [] break image = [next_image] # Add the last image that was found before the end, if any if image: cards.append(Card.fromstring(empty.join(image))) return cls._fromcards(cards) @classmethod def fromfile(cls, fileobj, sep='', endcard=True, padding=True): """ Similar to :meth:`Header.fromstring`, but reads the header string from a given file-like object or filename. Parameters ---------- fileobj : str, file-like A filename or an open file-like object from which a FITS header is to be read. For open file handles the file pointer must be at the beginning of the header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). endcard : bool, optional If True (the default) the header must end with an END card in order to be considered valid. If an END card is not found an `OSError` is raised. padding : bool, optional If True (the default) the header will be required to be padded out to a multiple of 2880, the FITS header block size. Otherwise any padding, or lack thereof, is ignored. Returns ------- header A new `Header` instance. """ close_file = False if isinstance(fileobj, path_like): # If sep is non-empty we are trying to read a header printed to a # text file, so open in text mode by default to support newline # handling; if a binary-mode file object is passed in, the user is # then on their own w.r.t. newline handling. # # Otherwise assume we are reading from an actual FITS file and open # in binary mode. if sep: fileobj = open(fileobj, 'r', encoding='latin1') else: fileobj = open(fileobj, 'rb') close_file = True try: is_binary = fileobj_is_binary(fileobj) def block_iter(nbytes): while True: data = fileobj.read(nbytes) if data: yield data else: break return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1] finally: if close_file: fileobj.close() @classmethod def _fromcards(cls, cards): header = cls() for idx, card in enumerate(cards): header._cards.append(card) keyword = Card.normalize_keyword(card.keyword) header._keyword_indices[keyword].append(idx) if card.field_specifier is not None: header._rvkc_indices[card.rawkeyword].append(idx) header._modified = False return header @classmethod def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding): """ The meat of `Header.fromfile`; in a separate method so that `Header.fromfile` itself is just responsible for wrapping file handling. Also used by `_BaseHDU.fromstring`. ``block_iter`` should be a callable which, given a block size n (typically 2880 bytes as used by the FITS standard) returns an iterator of byte strings of that block size. ``is_binary`` specifies whether the returned blocks are bytes or text Returns both the entire header *string*, and the `Header` object returned by Header.fromstring on that string. """ actual_block_size = _block_size(sep) clen = Card.length + len(sep) blocks = block_iter(actual_block_size) # Read the first header block. try: block = next(blocks) except StopIteration: raise EOFError() if not is_binary: # TODO: There needs to be error handling at *this* level for # non-ASCII characters; maybe at this stage decoding latin-1 might # be safer block = encode_ascii(block) read_blocks = [] is_eof = False end_found = False # continue reading header blocks until END card or EOF is reached while True: # find the END card end_found, block = cls._find_end_card(block, clen) read_blocks.append(decode_ascii(block)) if end_found: break try: block = next(blocks) except StopIteration: is_eof = True break if not block: is_eof = True break if not is_binary: block = encode_ascii(block) if not end_found and is_eof and endcard: # TODO: Pass this error to validation framework as an ERROR, # rather than raising an exception raise OSError('Header missing END card.') header_str = ''.join(read_blocks) _check_padding(header_str, actual_block_size, is_eof, check_block_size=padding) return header_str, cls.fromstring(header_str, sep=sep) @classmethod def _find_end_card(cls, block, card_len): """ Utility method to search a header block for the END card and handle invalid END cards. This method can also returned a modified copy of the input header block in case an invalid end card needs to be sanitized. """ for mo in HEADER_END_RE.finditer(block): # Ensure the END card was found, and it started on the # boundary of a new card (see ticket #142) if mo.start() % card_len != 0: continue # This must be the last header block, otherwise the # file is malformatted if mo.group('invalid'): offset = mo.start() trailing = block[offset + 3:offset + card_len - 3].rstrip() if trailing: trailing = repr(trailing).lstrip('ub') # TODO: Pass this warning up to the validation framework warnings.warn( 'Unexpected bytes trailing END keyword: {}; these ' 'bytes will be replaced with spaces on write.'.format( trailing), AstropyUserWarning) else: # TODO: Pass this warning up to the validation framework warnings.warn( 'Missing padding to end of the FITS block after the ' 'END keyword; additional spaces will be appended to ' 'the file upon writing to pad out to {} ' 'bytes.'.format(BLOCK_SIZE), AstropyUserWarning) # Sanitize out invalid END card now that the appropriate # warnings have been issued block = (block[:offset] + encode_ascii(END_CARD) + block[offset + len(END_CARD):]) return True, block return False, block def tostring(self, sep='', endcard=True, padding=True): r""" Returns a string representation of the header. By default this uses no separator between cards, adds the END card, and pads the string with spaces to the next multiple of 2880 bytes. That is, it returns the header exactly as it would appear in a FITS file. Parameters ---------- sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If True (default) adds the END card to the end of the header string padding : bool, optional If True (default) pads the string with spaces out to the next multiple of 2880 characters Returns ------- s : str A string representing a FITS header. """ lines = [] for card in self._cards: s = str(card) # Cards with CONTINUE cards may be longer than 80 chars; so break # them into multiple lines while s: lines.append(s[:Card.length]) s = s[Card.length:] s = sep.join(lines) if endcard: s += sep + _pad('END') if padding: s += ' ' * _pad_length(len(s)) return s @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tofile(self, fileobj, sep='', endcard=True, padding=True, overwrite=False): r""" Writes the header to file or file-like object. By default this writes the header exactly as it would be written to a FITS file, with the END card included and padding to the next multiple of 2880 bytes. However, aspects of this may be controlled. Parameters ---------- fileobj : str, file, optional Either the pathname of a file, or an open file handle or file-like object sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If `True` (default) adds the END card to the end of the header string padding : bool, optional If `True` (default) pads the string with spaces out to the next multiple of 2880 characters overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. """ close_file = fileobj_closed(fileobj) if not isinstance(fileobj, _File): fileobj = _File(fileobj, mode='ostream', overwrite=overwrite) try: blocks = self.tostring(sep=sep, endcard=endcard, padding=padding) actual_block_size = _block_size(sep) if padding and len(blocks) % actual_block_size != 0: raise OSError( 'Header size ({}) is not a multiple of block ' 'size ({}).'.format( len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE)) if not fileobj.simulateonly: fileobj.flush() fileobj.write(blocks.encode('ascii')) fileobj.flush() finally: if close_file: fileobj.close() @classmethod def fromtextfile(cls, fileobj, endcard=False): """ Read a header from a simple text file or file-like object. Equivalent to:: >>> Header.fromfile(fileobj, sep='\\n', endcard=False, ... padding=False) See Also -------- fromfile """ return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def totextfile(self, fileobj, endcard=False, overwrite=False): """ Write the header as text to a file or a file-like object. Equivalent to:: >>> Header.tofile(fileobj, sep='\\n', endcard=False, ... padding=False, overwrite=overwrite) .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. See Also -------- tofile """ self.tofile(fileobj, sep='\n', endcard=endcard, padding=False, overwrite=overwrite) def clear(self): """ Remove all cards from the header. """ self._cards = [] self._keyword_indices = collections.defaultdict(list) self._rvkc_indices = collections.defaultdict(list) def copy(self, strip=False): """ Make a copy of the :class:`Header`. .. versionchanged:: 1.3 `copy.copy` and `copy.deepcopy` on a `Header` will call this method. Parameters ---------- strip : bool, optional If `True`, strip any headers that are specific to one of the standard HDU types, so that this header can be used in a different HDU. Returns ------- header A new :class:`Header` instance. """ tmp = self.__class__((copy.copy(card) for card in self._cards)) if strip: tmp._strip() return tmp def __copy__(self): return self.copy() def __deepcopy__(self, *args, **kwargs): return self.copy() @classmethod def fromkeys(cls, iterable, value=None): """ Similar to :meth:`dict.fromkeys`--creates a new `Header` from an iterable of keywords and an optional default value. This method is not likely to be particularly useful for creating real world FITS headers, but it is useful for testing. Parameters ---------- iterable Any iterable that returns strings representing FITS keywords. value : optional A default value to assign to each keyword; must be a valid type for FITS keywords. Returns ------- header A new `Header` instance. """ d = cls() if not isinstance(value, tuple): value = (value,) for key in iterable: d.append((key,) + value) return d def get(self, key, default=None): """ Similar to :meth:`dict.get`--returns the value associated with keyword in the header, or a default value if the keyword is not found. Parameters ---------- key : str A keyword that may or may not be in the header. default : optional A default value to return if the keyword is not found in the header. Returns ------- value The value associated with the given keyword, or the default value if the keyword is not in the header. """ try: return self[key] except (KeyError, IndexError): return default def set(self, keyword, value=None, comment=None, before=None, after=None): """ Set the value and/or comment and/or position of a specified keyword. If the keyword does not already exist in the header, a new keyword is created in the specified position, or appended to the end of the header if no position is specified. This method is similar to :meth:`Header.update` prior to Astropy v0.1. .. note:: It should be noted that ``header.set(keyword, value)`` and ``header.set(keyword, value, comment)`` are equivalent to ``header[keyword] = value`` and ``header[keyword] = (value, comment)`` respectively. New keywords can also be inserted relative to existing keywords using, for example:: >>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes')) to insert before an existing keyword, or:: >>> header.insert('NAXIS', ('NAXIS1', 4096), after=True) to insert after an existing keyword. The only advantage of using :meth:`Header.set` is that it easily replaces the old usage of :meth:`Header.update` both conceptually and in terms of function signature. Parameters ---------- keyword : str A header keyword value : str, optional The value to set for the given keyword; if None the existing value is kept, but '' may be used to set a blank value comment : str, optional The comment to set for the given keyword; if None the existing comment is kept, but ``''`` may be used to set a blank comment before : str, int, optional Name of the keyword, or index of the `Card` before which this card should be located in the header. The argument ``before`` takes precedence over ``after`` if both specified. after : str, int, optional Name of the keyword, or index of the `Card` after which this card should be located in the header. """ # Create a temporary card that looks like the one being set; if the # temporary card turns out to be a RVKC this will make it easier to # deal with the idiosyncrasies thereof # Don't try to make a temporary card though if they keyword looks like # it might be a HIERARCH card or is otherwise invalid--this step is # only for validating RVKCs. if (len(keyword) <= KEYWORD_LENGTH and Card._keywd_FSC_RE.match(keyword) and keyword not in self._keyword_indices): new_card = Card(keyword, value, comment) new_keyword = new_card.keyword else: new_keyword = keyword if (new_keyword not in Card._commentary_keywords and new_keyword in self): if comment is None: comment = self.comments[keyword] if value is None: value = self[keyword] self[keyword] = (value, comment) if before is not None or after is not None: card = self._cards[self._cardindex(keyword)] self._relativeinsert(card, before=before, after=after, replace=True) elif before is not None or after is not None: self._relativeinsert((keyword, value, comment), before=before, after=after) else: self[keyword] = (value, comment) def items(self): """Like :meth:`dict.items`.""" for card in self._cards: yield (card.keyword, card.value) def keys(self): """ Like :meth:`dict.keys`--iterating directly over the `Header` instance has the same behavior. """ for card in self._cards: yield card.keyword def values(self): """Like :meth:`dict.values`.""" for card in self._cards: yield card.value def pop(self, *args): """ Works like :meth:`list.pop` if no arguments or an index argument are supplied; otherwise works like :meth:`dict.pop`. """ if len(args) > 2: raise TypeError('Header.pop expected at most 2 arguments, got ' '{}'.format(len(args))) if len(args) == 0: key = -1 else: key = args[0] try: value = self[key] except (KeyError, IndexError): if len(args) == 2: return args[1] raise del self[key] return value def popitem(self): """Similar to :meth:`dict.popitem`.""" try: k, v = next(self.items()) except StopIteration: raise KeyError('Header is empty') del self[k] return k, v def setdefault(self, key, default=None): """Similar to :meth:`dict.setdefault`.""" try: return self[key] except (KeyError, IndexError): self[key] = default return default def update(self, *args, **kwargs): """ Update the Header with new keyword values, updating the values of existing keywords and appending new keywords otherwise; similar to `dict.update`. `update` accepts either a dict-like object or an iterable. In the former case the keys must be header keywords and the values may be either scalar values or (value, comment) tuples. In the case of an iterable the items must be (keyword, value) tuples or (keyword, value, comment) tuples. Arbitrary arguments are also accepted, in which case the update() is called again with the kwargs dict as its only argument. That is, :: >>> header.update(NAXIS1=100, NAXIS2=100) is equivalent to:: header.update({'NAXIS1': 100, 'NAXIS2': 100}) .. warning:: As this method works similarly to `dict.update` it is very different from the ``Header.update()`` method in Astropy v0.1. Use of the old API was **deprecated** for a long time and is now removed. Most uses of the old API can be replaced as follows: * Replace :: header.update(keyword, value) with :: header[keyword] = value * Replace :: header.update(keyword, value, comment=comment) with :: header[keyword] = (value, comment) * Replace :: header.update(keyword, value, before=before_keyword) with :: header.insert(before_keyword, (keyword, value)) * Replace :: header.update(keyword, value, after=after_keyword) with :: header.insert(after_keyword, (keyword, value), after=True) See also :meth:`Header.set` which is a new method that provides an interface similar to the old ``Header.update()`` and may help make transition a little easier. """ if args: other = args[0] else: other = None def update_from_dict(k, v): if not isinstance(v, tuple): card = Card(k, v) elif 0 < len(v) <= 2: card = Card(*((k,) + v)) else: raise ValueError( 'Header update value for key %r is invalid; the ' 'value must be either a scalar, a 1-tuple ' 'containing the scalar value, or a 2-tuple ' 'containing the value and a comment string.' % k) self._update(card) if other is None: pass elif isinstance(other, Header): for card in other.cards: self._update(card) elif hasattr(other, 'items'): for k, v in other.items(): update_from_dict(k, v) elif hasattr(other, 'keys'): for k in other.keys(): update_from_dict(k, other[k]) else: for idx, card in enumerate(other): if isinstance(card, Card): self._update(card) elif isinstance(card, tuple) and (1 < len(card) <= 3): self._update(Card(*card)) else: raise ValueError( 'Header update sequence item #{} is invalid; ' 'the item must either be a 2-tuple containing ' 'a keyword and value, or a 3-tuple containing ' 'a keyword, value, and comment string.'.format(idx)) if kwargs: self.update(kwargs) def append(self, card=None, useblanks=True, bottom=False, end=False): """ Appends a new keyword+value card to the end of the Header, similar to `list.append`. By default if the last cards in the Header have commentary keywords, this will append the new keyword before the commentary (unless the new keyword is also commentary). Also differs from `list.append` in that it can be called with no arguments: In this case a blank card is appended to the end of the Header. In the case all the keyword arguments are ignored. Parameters ---------- card : str, tuple A keyword or a (keyword, value, [comment]) tuple representing a single header card; the comment is optional in which case a 2-tuple may be used useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. bottom : bool, optional If True, instead of appending after the last non-commentary card, append after the last non-blank card. end : bool, optional If True, ignore the useblanks and bottom options, and append at the very end of the Header. """ if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( 'The value appended to a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if not end and card.is_blank: # Blank cards should always just be appended to the end end = True if end: self._cards.append(card) idx = len(self._cards) - 1 else: idx = len(self._cards) - 1 while idx >= 0 and self._cards[idx].is_blank: idx -= 1 if not bottom and card.keyword not in Card._commentary_keywords: while (idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords): idx -= 1 idx += 1 self._cards.insert(idx, card) self._updateindices(idx) keyword = Card.normalize_keyword(card.keyword) self._keyword_indices[keyword].append(idx) if card.field_specifier is not None: self._rvkc_indices[card.rawkeyword].append(idx) if not end: # If the appended card was a commentary card, and it was appended # before existing cards with the same keyword, the indices for # cards with that keyword may have changed if not bottom and card.keyword in Card._commentary_keywords: self._keyword_indices[keyword].sort() # Finally, if useblanks, delete a blank cards from the end if useblanks and self._countblanks(): # Don't do this unless there is at least one blanks at the end # of the header; we need to convert the card to its string # image to see how long it is. In the vast majority of cases # this will just be 80 (Card.length) but it may be longer for # CONTINUE cards self._useblanks(len(str(card)) // Card.length) self._modified = True def extend(self, cards, strip=True, unique=False, update=False, update_first=False, useblanks=True, bottom=False, end=False): """ Appends multiple keyword+value cards to the end of the header, similar to `list.extend`. Parameters ---------- cards : iterable An iterable of (keyword, value, [comment]) tuples; see `Header.append`. strip : bool, optional Remove any keywords that have meaning only to specific types of HDUs, so that only more general keywords are added from extension Header or Card list (default: `True`). unique : bool, optional If `True`, ensures that no duplicate keywords are appended; keywords already in this header are simply discarded. The exception is commentary keywords (COMMENT, HISTORY, etc.): they are only treated as duplicates if their values match. update : bool, optional If `True`, update the current header with the values and comments from duplicate keywords in the input header. This supersedes the ``unique`` argument. Commentary keywords are treated the same as if ``unique=True``. update_first : bool, optional If the first keyword in the header is 'SIMPLE', and the first keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is replaced by the 'XTENSION' keyword. Likewise if the first keyword in the header is 'XTENSION' and the first keyword in the input header is 'SIMPLE', the 'XTENSION' keyword is replaced by the 'SIMPLE' keyword. This behavior is otherwise dumb as to whether or not the resulting header is a valid primary or extension header. This is mostly provided to support backwards compatibility with the old ``Header.fromTxtFile`` method, and only applies if ``update=True``. useblanks, bottom, end : bool, optional These arguments are passed to :meth:`Header.append` while appending new cards to the header. """ temp = self.__class__(cards) if strip: temp._strip() if len(self): first = self._cards[0].keyword else: first = None # We don't immediately modify the header, because first we need to sift # out any duplicates in the new header prior to adding them to the # existing header, but while *allowing* duplicates from the header # being extended from (see ticket #156) extend_cards = [] for idx, card in enumerate(temp.cards): keyword = card.keyword if keyword not in Card._commentary_keywords: if unique and not update and keyword in self: continue elif update: if idx == 0 and update_first: # Dumbly update the first keyword to either SIMPLE or # XTENSION as the case may be, as was in the case in # Header.fromTxtFile if ((keyword == 'SIMPLE' and first == 'XTENSION') or (keyword == 'XTENSION' and first == 'SIMPLE')): del self[0] self.insert(0, card) else: self[keyword] = (card.value, card.comment) elif keyword in self: self[keyword] = (card.value, card.comment) else: extend_cards.append(card) else: extend_cards.append(card) else: if (unique or update) and keyword in self: if card.is_blank: extend_cards.append(card) continue for value in self[keyword]: if value == card.value: break else: extend_cards.append(card) else: extend_cards.append(card) for card in extend_cards: self.append(card, useblanks=useblanks, bottom=bottom, end=end) def count(self, keyword): """ Returns the count of the given keyword in the header, similar to `list.count` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to count instances of in the header """ keyword = Card.normalize_keyword(keyword) # We have to look before we leap, since otherwise _keyword_indices, # being a defaultdict, will create an entry for the nonexistent keyword if keyword not in self._keyword_indices: raise KeyError(f"Keyword {keyword!r} not found.") return len(self._keyword_indices[keyword]) def index(self, keyword, start=None, stop=None): """ Returns the index if the first instance of the given keyword in the header, similar to `list.index` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to look up in the list of all keywords in the header start : int, optional The lower bound for the index stop : int, optional The upper bound for the index """ if start is None: start = 0 if stop is None: stop = len(self._cards) if stop < start: step = -1 else: step = 1 norm_keyword = Card.normalize_keyword(keyword) for idx in range(start, stop, step): if self._cards[idx].keyword.upper() == norm_keyword: return idx else: raise ValueError('The keyword {!r} is not in the ' ' header.'.format(keyword)) def insert(self, key, card, useblanks=True, after=False): """ Inserts a new keyword+value card into the Header at a given location, similar to `list.insert`. Parameters ---------- key : int, str, or tuple The index into the list of header keywords before which the new keyword should be inserted, or the name of a keyword before which the new keyword should be inserted. Can also accept a (keyword, index) tuple for inserting around duplicate keywords. card : str, tuple A keyword or a (keyword, value, [comment]) tuple; see `Header.append` useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. after : bool, optional If set to `True`, insert *after* the specified index or keyword, rather than before it. Defaults to `False`. """ if not isinstance(key, int): # Don't pass through ints to _cardindex because it will not take # kindly to indices outside the existing number of cards in the # header, which insert needs to be able to support (for example # when inserting into empty headers) idx = self._cardindex(key) else: idx = key if after: if idx == -1: idx = len(self._cards) else: idx += 1 if idx >= len(self._cards): # This is just an append (Though it must be an append absolutely to # the bottom, ignoring blanks, etc.--the point of the insert method # is that you get exactly what you asked for with no surprises) self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( 'The value inserted into a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) self._cards.insert(idx, card) keyword = card.keyword # If idx was < 0, determine the actual index according to the rules # used by list.insert() if idx < 0: idx += len(self._cards) - 1 if idx < 0: idx = 0 # All the keyword indices above the insertion point must be updated self._updateindices(idx) keyword = Card.normalize_keyword(keyword) self._keyword_indices[keyword].append(idx) count = len(self._keyword_indices[keyword]) if count > 1: # There were already keywords with this same name if keyword not in Card._commentary_keywords: warnings.warn( 'A {!r} keyword already exists in this header. Inserting ' 'duplicate keyword.'.format(keyword), AstropyUserWarning) self._keyword_indices[keyword].sort() if card.field_specifier is not None: # Update the index of RVKC as well rvkc_indices = self._rvkc_indices[card.rawkeyword] rvkc_indices.append(idx) rvkc_indices.sort() if useblanks: self._useblanks(len(str(card)) // Card.length) self._modified = True def remove(self, keyword, ignore_missing=False, remove_all=False): """ Removes the first instance of the given keyword from the header similar to `list.remove` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword of which to remove the first instance in the header. ignore_missing : bool, optional When True, ignores missing keywords. Otherwise, if the keyword is not present in the header a KeyError is raised. remove_all : bool, optional When True, all instances of keyword will be removed. Otherwise only the first instance of the given keyword is removed. """ keyword = Card.normalize_keyword(keyword) if keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] if remove_all: while keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] elif not ignore_missing: raise KeyError(f"Keyword '{keyword}' not found.") def rename_keyword(self, oldkeyword, newkeyword, force=False): """ Rename a card's keyword in the header. Parameters ---------- oldkeyword : str or int Old keyword or card index newkeyword : str New keyword force : bool, optional When `True`, if the new keyword already exists in the header, force the creation of a duplicate keyword. Otherwise a `ValueError` is raised. """ oldkeyword = Card.normalize_keyword(oldkeyword) newkeyword = Card.normalize_keyword(newkeyword) if newkeyword == 'CONTINUE': raise ValueError('Can not rename to CONTINUE') if (newkeyword in Card._commentary_keywords or oldkeyword in Card._commentary_keywords): if not (newkeyword in Card._commentary_keywords and oldkeyword in Card._commentary_keywords): raise ValueError('Regular and commentary keys can not be ' 'renamed to each other.') elif not force and newkeyword in self: raise ValueError('Intended keyword {} already exists in header.' .format(newkeyword)) idx = self.index(oldkeyword) card = self._cards[idx] del self[idx] self.insert(idx, (newkeyword, card.value, card.comment)) def add_history(self, value, before=None, after=None): """ Add a ``HISTORY`` card. Parameters ---------- value : str History text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('HISTORY', value, before=before, after=after) def add_comment(self, value, before=None, after=None): """ Add a ``COMMENT`` card. Parameters ---------- value : str Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('COMMENT', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): """ Add a blank card. Parameters ---------- value : str, optional Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('', value, before=before, after=after) def _update(self, card): """ The real update code. If keyword already exists, its value and/or comment will be updated. Otherwise a new card will be appended. This will not create a duplicate keyword except in the case of commentary cards. The only other way to force creation of a duplicate is to use the insert(), append(), or extend() methods. """ keyword, value, comment = card # Lookups for existing/known keywords are case-insensitive keyword = keyword.upper() if keyword.startswith('HIERARCH '): keyword = keyword[9:] if (keyword not in Card._commentary_keywords and keyword in self._keyword_indices): # Easy; just update the value/comment idx = self._keyword_indices[keyword][0] existing_card = self._cards[idx] existing_card.value = value if comment is not None: # '' should be used to explicitly blank a comment existing_card.comment = comment if existing_card._modified: self._modified = True elif keyword in Card._commentary_keywords: cards = self._splitcommentary(keyword, value) if keyword in self._keyword_indices: # Append after the last keyword of the same type idx = self.index(keyword, start=len(self) - 1, stop=-1) isblank = not (keyword or value or comment) for c in reversed(cards): self.insert(idx + 1, c, useblanks=(not isblank)) else: for c in cards: self.append(c, bottom=True) else: # A new keyword! self.append() will handle updating _modified self.append(card) def _cardindex(self, key): """Returns an index into the ._cards list given a valid lookup key.""" # This used to just set key = (key, 0) and then go on to act as if the # user passed in a tuple, but it's much more common to just be given a # string as the key, so optimize more for that case if isinstance(key, str): keyword = key n = 0 elif isinstance(key, int): # If < 0, determine the actual index if key < 0: key += len(self._cards) if key < 0 or key >= len(self._cards): raise IndexError('Header index out of range.') return key elif isinstance(key, slice): return key elif isinstance(key, tuple): if (len(key) != 2 or not isinstance(key[0], str) or not isinstance(key[1], int)): raise ValueError( 'Tuple indices must be 2-tuples consisting of a ' 'keyword string and an integer index.') keyword, n = key else: raise ValueError( 'Header indices must be either a string, a 2-tuple, or ' 'an integer.') keyword = Card.normalize_keyword(keyword) # Returns the index into _cards for the n-th card with the given # keyword (where n is 0-based) indices = self._keyword_indices.get(keyword, None) if keyword and not indices: if len(keyword) > KEYWORD_LENGTH or '.' in keyword: raise KeyError(f"Keyword {keyword!r} not found.") else: # Maybe it's a RVKC? indices = self._rvkc_indices.get(keyword, None) if not indices: raise KeyError(f"Keyword {keyword!r} not found.") try: return indices[n] except IndexError: raise IndexError('There are only {} {!r} cards in the ' 'header.'.format(len(indices), keyword)) def _keyword_from_index(self, idx): """ Given an integer index, return the (keyword, repeat) tuple that index refers to. For most keywords the repeat will always be zero, but it may be greater than zero for keywords that are duplicated (especially commentary keywords). In a sense this is the inverse of self.index, except that it also supports duplicates. """ if idx < 0: idx += len(self._cards) keyword = self._cards[idx].keyword keyword = Card.normalize_keyword(keyword) repeat = self._keyword_indices[keyword].index(idx) return keyword, repeat def _relativeinsert(self, card, before=None, after=None, replace=False): """ Inserts a new card before or after an existing card; used to implement support for the legacy before/after keyword arguments to Header.update(). If replace=True, move an existing card with the same keyword. """ if before is None: insertionkey = after else: insertionkey = before def get_insertion_idx(): if not (isinstance(insertionkey, int) and insertionkey >= len(self._cards)): idx = self._cardindex(insertionkey) else: idx = insertionkey if before is None: idx += 1 return idx if replace: # The card presumably already exists somewhere in the header. # Check whether or not we actually have to move it; if it does need # to be moved we just delete it and then it will be reinserted # below old_idx = self._cardindex(card.keyword) insertion_idx = get_insertion_idx() if (insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1): # The card would be appended to the end, but it's already at # the end return if before is not None: if old_idx == insertion_idx - 1: return elif after is not None and old_idx == insertion_idx: return del self[old_idx] # Even if replace=True, the insertion idx may have changed since the # old card was deleted idx = get_insertion_idx() if card[0] in Card._commentary_keywords: cards = reversed(self._splitcommentary(card[0], card[1])) else: cards = [card] for c in cards: self.insert(idx, c) def _updateindices(self, idx, increment=True): """ For all cards with index above idx, increment or decrement its index value in the keyword_indices dict. """ if idx > len(self._cards): # Save us some effort return increment = 1 if increment else -1 for index_sets in (self._keyword_indices, self._rvkc_indices): for indices in index_sets.values(): for jdx, keyword_index in enumerate(indices): if keyword_index >= idx: indices[jdx] += increment def _countblanks(self): """Returns the number of blank cards at the end of the Header.""" for idx in range(1, len(self._cards)): if not self._cards[-idx].is_blank: return idx - 1 return 0 def _useblanks(self, count): for _ in range(count): if self._cards[-1].is_blank: del self[-1] else: break def _haswildcard(self, keyword): """Return `True` if the input keyword contains a wildcard pattern.""" return (isinstance(keyword, str) and (keyword.endswith('...') or '*' in keyword or '?' in keyword)) def _wildcardmatch(self, pattern): """ Returns a list of indices of the cards matching the given wildcard pattern. * '*' matches 0 or more characters * '?' matches a single character * '...' matches 0 or more of any non-whitespace character """ pattern = pattern.replace('*', r'.*').replace('?', r'.') pattern = pattern.replace('...', r'\S*') + '$' pattern_re = re.compile(pattern, re.I) return [idx for idx, card in enumerate(self._cards) if pattern_re.match(card.keyword)] def _set_slice(self, key, value, target): """ Used to implement Header.__setitem__ and CardAccessor.__setitem__. """ if isinstance(key, slice) or self._haswildcard(key): if isinstance(key, slice): indices = range(*key.indices(len(target))) else: indices = self._wildcardmatch(key) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): target[idx] = val return True return False def _splitcommentary(self, keyword, value): """ Given a commentary keyword and value, returns a list of the one or more cards needed to represent the full value. This is primarily used to create the multiple commentary cards needed to represent a long value that won't fit into a single commentary card. """ # The maximum value in each card can be the maximum card length minus # the maximum key length (which can include spaces if they key length # less than 8 maxlen = Card.length - KEYWORD_LENGTH valuestr = str(value) if len(valuestr) <= maxlen: # The value can fit in a single card cards = [Card(keyword, value)] else: # The value must be split across multiple consecutive commentary # cards idx = 0 cards = [] while idx < len(valuestr): cards.append(Card(keyword, valuestr[idx:idx + maxlen])) idx += maxlen return cards def _strip(self): """ Strip cards specific to a certain kind of header. Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of the header can be used to reconstruct another kind of header. """ # TODO: Previously this only deleted some cards specific to an HDU if # _hdutype matched that type. But it seemed simple enough to just # delete all desired cards anyways, and just ignore the KeyErrors if # they don't exist. # However, it might be desirable to make this extendable somehow--have # a way for HDU classes to specify some headers that are specific only # to that type, and should be removed otherwise. if 'NAXIS' in self: naxis = self['NAXIS'] else: naxis = 0 if 'TFIELDS' in self: tfields = self['TFIELDS'] else: tfields = 0 for idx in range(naxis): try: del self['NAXIS' + str(idx + 1)] except KeyError: pass for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'): for idx in range(tfields): try: del self[name + str(idx + 1)] except KeyError: pass for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND', 'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO', 'TFIELDS'): try: del self[name] except KeyError: pass def _add_commentary(self, key, value, before=None, after=None): """ Add a commentary card. If ``before`` and ``after`` are `None`, add to the last occurrence of cards of the same name (except blank card). If there is no card (or blank card), append at the end. """ if before is not None or after is not None: self._relativeinsert((key, value), before=before, after=after) else: self[key] = value collections.abc.MutableSequence.register(Header) collections.abc.MutableMapping.register(Header) class _DelayedHeader: """ Descriptor used to create the Header object from the header string that was stored in HDU._header_str when parsing the file. """ def __get__(self, obj, owner=None): try: return obj.__dict__['_header'] except KeyError: if obj._header_str is not None: hdr = Header.fromstring(obj._header_str) obj._header_str = None else: raise AttributeError("'{}' object has no attribute '_header'" .format(obj.__class__.__name__)) obj.__dict__['_header'] = hdr return hdr def __set__(self, obj, val): obj.__dict__['_header'] = val def __delete__(self, obj): del obj.__dict__['_header'] class _BasicHeaderCards: """ This class allows to access cards with the _BasicHeader.cards attribute. This is needed because during the HDU class detection, some HDUs uses the .cards interface. Cards cannot be modified here as the _BasicHeader object will be deleted once the HDU object is created. """ def __init__(self, header): self.header = header def __getitem__(self, key): # .cards is a list of cards, so key here is an integer. # get the keyword name from its index. key = self.header._keys[key] # then we get the card from the _BasicHeader._cards list, or parse it # if needed. try: return self.header._cards[key] except KeyError: cardstr = self.header._raw_cards[key] card = Card.fromstring(cardstr) self.header._cards[key] = card return card class _BasicHeader(collections.abc.Mapping): """This class provides a fast header parsing, without all the additional features of the Header class. Here only standard keywords are parsed, no support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc. The raw card images are stored and parsed only if needed. The idea is that to create the HDU objects, only a small subset of standard cards is needed. Once a card is parsed, which is deferred to the Card class, the Card object is kept in a cache. This is useful because a small subset of cards is used a lot in the HDU creation process (NAXIS, XTENSION, ...). """ def __init__(self, cards): # dict of (keywords, card images) self._raw_cards = cards self._keys = list(cards.keys()) # dict of (keyword, Card object) storing the parsed cards self._cards = {} # the _BasicHeaderCards object allows to access Card objects from # keyword indices self.cards = _BasicHeaderCards(self) self._modified = False def __getitem__(self, key): if isinstance(key, int): key = self._keys[key] try: return self._cards[key].value except KeyError: # parse the Card and store it cardstr = self._raw_cards[key] self._cards[key] = card = Card.fromstring(cardstr) return card.value def __len__(self): return len(self._raw_cards) def __iter__(self): return iter(self._raw_cards) def index(self, keyword): return self._keys.index(keyword) @classmethod def fromfile(cls, fileobj): """The main method to parse a FITS header from a file. The parsing is done with the parse_header function implemented in Cython.""" close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'rb') close_file = True try: header_str, cards = parse_header(fileobj) _check_padding(header_str, BLOCK_SIZE, False) return header_str, cls(cards) finally: if close_file: fileobj.close() class _CardAccessor: """ This is a generic class for wrapping a Header in such a way that you can use the header's slice/filtering capabilities to return a subset of cards and do something with them. This is sort of the opposite notion of the old CardList class--whereas Header used to use CardList to get lists of cards, this uses Header to get lists of cards. """ # TODO: Consider giving this dict/list methods like Header itself def __init__(self, header): self._header = header def __repr__(self): return '\n'.join(repr(c) for c in self._header._cards) def __len__(self): return len(self._header._cards) def __iter__(self): return iter(self._header._cards) def __eq__(self, other): # If the `other` item is a scalar we will still treat it as equal if # this _CardAccessor only contains one item if not isiterable(other) or isinstance(other, str): if len(self) == 1: other = [other] else: return False for a, b in itertools.zip_longest(self, other): if a != b: return False else: return True def __ne__(self, other): return not (self == other) def __getitem__(self, item): if isinstance(item, slice) or self._header._haswildcard(item): return self.__class__(self._header[item]) idx = self._header._cardindex(item) return self._header._cards[idx] def _setslice(self, item, value): """ Helper for implementing __setitem__ on _CardAccessor subclasses; slices should always be handled in this same way. """ if isinstance(item, slice) or self._header._haswildcard(item): if isinstance(item, slice): indices = range(*item.indices(len(self))) else: indices = self._header._wildcardmatch(item) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): self[idx] = val return True return False collections.abc.Mapping.register(_CardAccessor) collections.abc.Sequence.register(_CardAccessor) class _HeaderComments(_CardAccessor): """ A class used internally by the Header class for the Header.comments attribute access. This object can be used to display all the keyword comments in the Header, or look up the comments on specific keywords. It allows all the same forms of keyword lookup as the Header class itself, but returns comments instead of values. """ def __iter__(self): for card in self._header._cards: yield card.comment def __repr__(self): """Returns a simple list of all keywords and their comments.""" keyword_length = KEYWORD_LENGTH for card in self._header._cards: keyword_length = max(keyword_length, len(card.keyword)) return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment, len=keyword_length) for c in self._header._cards) def __getitem__(self, item): """ Slices and filter strings return a new _HeaderComments containing the returned cards. Otherwise the comment of a single card is returned. """ item = super().__getitem__(item) if isinstance(item, _HeaderComments): # The item key was a slice return item return item.comment def __setitem__(self, item, comment): """ Set/update the comment on specified card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, comment, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards idx = self._header._cardindex(item) value = self._header[idx] self._header[idx] = (value, comment) class _HeaderCommentaryCards(_CardAccessor): """ This is used to return a list-like sequence over all the values in the header for a given commentary keyword, such as HISTORY. """ def __init__(self, header, keyword=''): super().__init__(header) self._keyword = keyword self._count = self._header.count(self._keyword) self._indices = slice(self._count).indices(self._count) # __len__ and __iter__ need to be overridden from the base class due to the # different approach this class has to take for slicing def __len__(self): return len(range(*self._indices)) def __iter__(self): for idx in range(*self._indices): yield self._header[(self._keyword, idx)] def __repr__(self): return '\n'.join(self) def __getitem__(self, idx): if isinstance(idx, slice): n = self.__class__(self._header, self._keyword) n._indices = idx.indices(self._count) return n elif not isinstance(idx, int): raise ValueError(f'{self._keyword} index must be an integer') idx = list(range(*self._indices))[idx] return self._header[(self._keyword, idx)] def __setitem__(self, item, value): """ Set the value of a specified commentary card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, value, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards self._header[(self._keyword, item)] = value def _block_size(sep): """ Determine the size of a FITS header block if a non-blank separator is used between cards. """ return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1)) def _pad_length(stringlen): """Bytes needed to pad the input stringlen to the next FITS block.""" return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE def _check_padding(header_str, block_size, is_eof, check_block_size=True): # Strip any zero-padding (see ticket #106) if header_str and header_str[-1] == '\0': if is_eof and header_str.strip('\0') == '': # TODO: Pass this warning to validation framework warnings.warn( 'Unexpected extra padding at the end of the file. This ' 'padding may not be preserved when saving changes.', AstropyUserWarning) raise EOFError() else: # Replace the illegal null bytes with spaces as required by # the FITS standard, and issue a nasty warning # TODO: Pass this warning to validation framework warnings.warn( 'Header block contains null bytes instead of spaces for ' 'padding, and is not FITS-compliant. Nulls may be ' 'replaced with spaces upon writing.', AstropyUserWarning) header_str.replace('\0', ' ') if check_block_size and (len(header_str) % block_size) != 0: # This error message ignores the length of the separator for # now, but maybe it shouldn't? actual_len = len(header_str) - block_size + BLOCK_SIZE # TODO: Pass this error to validation framework raise ValueError('Header size is not multiple of {}: {}' .format(BLOCK_SIZE, actual_len))
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/io/fits/header.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astronomical and physics constants in SI units. See :mod:`astropy.constants` for a complete listing of constants defined in Astropy. """ import math from .constant import Constant, EMConstant # PHYSICAL CONSTANTS # https://en.wikipedia.org/wiki/2019_redefinition_of_SI_base_units class CODATA2018(Constant): default_reference = 'CODATA 2018' _registry = {} _has_incompatible_units = set() class EMCODATA2018(CODATA2018, EMConstant): _registry = CODATA2018._registry h = CODATA2018('h', "Planck constant", 6.62607015e-34, 'J s', 0.0, system='si') hbar = CODATA2018('hbar', "Reduced Planck constant", h.value / (2 * math.pi), 'J s', 0.0, system='si') k_B = CODATA2018('k_B', "Boltzmann constant", 1.380649e-23, 'J / (K)', 0.0, system='si') c = CODATA2018('c', "Speed of light in vacuum", 299792458., 'm / (s)', 0.0, system='si') G = CODATA2018('G', "Gravitational constant", 6.67430e-11, 'm3 / (kg s2)', 0.00015e-11, system='si') g0 = CODATA2018('g0', "Standard acceleration of gravity", 9.80665, 'm / s2', 0.0, system='si') m_p = CODATA2018('m_p', "Proton mass", 1.67262192369e-27, 'kg', 0.00000000051e-27, system='si') m_n = CODATA2018('m_n', "Neutron mass", 1.67492749804e-27, 'kg', 0.00000000095e-27, system='si') m_e = CODATA2018('m_e', "Electron mass", 9.1093837015e-31, 'kg', 0.0000000028e-31, system='si') u = CODATA2018('u', "Atomic mass", 1.66053906660e-27, 'kg', 0.00000000050e-27, system='si') sigma_sb = CODATA2018( 'sigma_sb', "Stefan-Boltzmann constant", 2 * math.pi ** 5 * k_B.value ** 4 / (15 * h.value ** 3 * c.value ** 2), 'W / (K4 m2)', 0.0, system='si') e = EMCODATA2018('e', 'Electron charge', 1.602176634e-19, 'C', 0.0, system='si') eps0 = EMCODATA2018('eps0', 'Vacuum electric permittivity', 8.8541878128e-12, 'F/m', 0.0000000013e-12, system='si') N_A = CODATA2018('N_A', "Avogadro's number", 6.02214076e23, '1 / (mol)', 0.0, system='si') R = CODATA2018('R', "Gas constant", k_B.value * N_A.value, 'J / (K mol)', 0.0, system='si') Ryd = CODATA2018('Ryd', 'Rydberg constant', 10973731.568160, '1 / (m)', 0.000021, system='si') a0 = CODATA2018('a0', "Bohr radius", 5.29177210903e-11, 'm', 0.00000000080e-11, system='si') muB = CODATA2018('muB', "Bohr magneton", 9.2740100783e-24, 'J/T', 0.0000000028e-24, system='si') alpha = CODATA2018('alpha', "Fine-structure constant", 7.2973525693e-3, '', 0.0000000011e-3, system='si') atm = CODATA2018('atm', "Standard atmosphere", 101325, 'Pa', 0.0, system='si') mu0 = CODATA2018('mu0', "Vacuum magnetic permeability", 1.25663706212e-6, 'N/A2', 0.00000000019e-6, system='si') sigma_T = CODATA2018('sigma_T', "Thomson scattering cross-section", 6.6524587321e-29, 'm2', 0.0000000060e-29, system='si') # Formula taken from NIST wall chart. # The numerical factor is from a numerical solution to the equation for the # maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law b_wien = CODATA2018('b_wien', 'Wien wavelength displacement law constant', h.value * c.value / (k_B.value * 4.965114231744276), 'm K', 0.0, system='si') # CGS constants. # Only constants that cannot be converted directly from S.I. are defined here. # Because both e and c are exact, these are also exact by definition. e_esu = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0, 'statC', 0.0, system='esu') e_emu = EMCODATA2018(e.abbrev, e.name, e.value / 10, 'abC', 0.0, system='emu') e_gauss = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0, 'Fr', 0.0, system='gauss')
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/constants/codata2018.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage provides a framework for representing models and performing model evaluation and fitting. It supports 1D and 2D models and fitting with parameter constraints. It has some predefined models and fitting routines. """ from . import fitting from . import models from .core import * from .parameters import * from .separable import *
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/modeling/__init__.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.baseframe import frame_transform_graph, base_doc from astropy.coordinates.attributes import TimeAttribute from astropy.coordinates.transformations import DynamicMatrixTransform from astropy.coordinates import earth_orientation as earth from .baseradec import BaseRADecFrame, doc_components from .utils import EQUINOX_J2000 __all__ = ['FK5'] doc_footer = """ Other parameters ---------------- equinox : `~astropy.time.Time` The equinox of this frame. """ @format_doc(base_doc, components=doc_components, footer=doc_footer) class FK5(BaseRADecFrame): """ A coordinate or frame in the FK5 system. Note that this is a barycentric version of FK5 - that is, the origin for this frame is the Solar System Barycenter, *not* the Earth geocenter. The frame attributes are listed under **Other Parameters**. """ equinox = TimeAttribute(default=EQUINOX_J2000) @staticmethod def _precession_matrix(oldequinox, newequinox): """ Compute and return the precession matrix for FK5 based on Capitaine et al. 2003/IAU2006. Used inside some of the transformation functions. Parameters ---------- oldequinox : `~astropy.time.Time` The equinox to precess from. newequinox : `~astropy.time.Time` The equinox to precess to. Returns ------- newcoord : array The precession matrix to transform to the new equinox """ return earth.precession_matrix_Capitaine(oldequinox, newequinox) # This is the "self-transform". Defined at module level because the decorator # needs a reference to the FK5 class @frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5) def fk5_to_fk5(fk5coord1, fk5frame2): return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/coordinates/builtin_frames/fk5.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from os.path import join from distutils.core import Extension from distutils import log from astropy_helpers import setup_helpers, utils from astropy_helpers.version_helpers import get_pkg_version_module wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py')) MODELING_ROOT = os.path.relpath(os.path.dirname(__file__)) MODELING_SRC = join(MODELING_ROOT, 'src') SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'), __file__] GEN_FILES = [join(MODELING_SRC, 'projections.c')] # This defines the set of projection functions that we want to wrap. # The key is the projection name, and the value is the number of # parameters. # (These are in the order that the appear in the WCS coordinate # systems paper). projections = { 'azp': 2, 'szp': 3, 'tan': 0, 'stg': 0, 'sin': 2, 'arc': 0, 'zea': 0, 'air': 1, 'cyp': 2, 'cea': 1, 'mer': 0, 'sfl': 0, 'par': 0, 'mol': 0, 'ait': 0, 'cop': 2, 'coe': 2, 'cod': 2, 'coo': 2, 'bon': 1, 'pco': 0, 'tsc': 0, 'csc': 0, 'qsc': 0, 'hpx': 2, 'xph': 0, } def pre_build_py_hook(cmd_obj): preprocess_source() def pre_build_ext_hook(cmd_obj): preprocess_source() def pre_sdist_hook(cmd_obj): preprocess_source() def preprocess_source(): # TODO: Move this to setup_helpers # Generating the wcslib wrappers should only be done if needed. This also # ensures that it is not done for any release tarball since those will # include core.py and core.c. if all(os.path.exists(filename) for filename in GEN_FILES): # Determine modification times src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES) gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES) version = get_pkg_version_module('astropy') if gen_mtime > src_mtime: # If generated source is recent enough, don't update return elif version.release: # or, if we're on a release, issue a warning, but go ahead and use # the wrappers anyway log.warn('WARNING: The autogenerated wrappers in ' 'astropy.modeling._projections seem to be older ' 'than the source templates used to create ' 'them. Because this is a release version we will ' 'use them anyway, but this might be a sign of ' 'some sort of version mismatch or other ' 'tampering. Or it might just mean you moved ' 'some files around or otherwise accidentally ' 'changed timestamps.') return # otherwise rebuild the autogenerated files # If jinja2 isn't present, then print a warning and use existing files try: import jinja2 # pylint: disable=W0611 except ImportError: log.warn("WARNING: jinja2 could not be imported, so the existing " "modeling _projections.c file will be used") return from jinja2 import Environment, FileSystemLoader # Prepare the jinja2 templating environment env = Environment(loader=FileSystemLoader(MODELING_SRC)) c_in = env.get_template('projections.c.templ') c_out = c_in.render(projections=projections) with open(join(MODELING_SRC, 'projections.c'), 'w') as fd: fd.write(c_out) def get_extensions(): wcslib_files = [ # List of wcslib files to compile 'prj.c', 'wcserr.c', 'wcsprintf.c', 'wcsutil.c' ] wcslib_config_paths = [ join(MODELING_SRC, 'wcsconfig.h') ] cfg = setup_helpers.DistutilsExtensionArgs() wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(MODELING_SRC) astropy_files = [ # List of astropy.modeling files to compile 'projections.c' ] cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in cfg.items()) return [Extension('astropy.modeling._projections', **cfg)]
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from astropy.io import ascii from astropy.table import Table from astropy import table from astropy.units import Unit from astropy.table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from astropy.io.ascii import core from astropy.io.ascii.ui import _probably_html, get_read_trace, cparser from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True asciiIO = lambda x: BytesIO(x.encode('ascii')) @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"): dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ['10.1E+19', '3.14', '2048', '-23'] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(', '.join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ') for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v+',' def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismach in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if ('Reader' in test_opts and 'fast_{}'.format(test_opts['Reader']._format_name) in core.FAST_CLASSES): # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if f'fast_{format}' in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'): assert np.all(col.mask) else: assert not hasattr(col, 'mask') def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('data/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('data/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('data/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('data/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('data/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('data/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('data/nls1_stackinfo.dbout') cols = get_testfiles('data/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 'data/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('data/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 'data/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 'data/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('data/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert not hasattr(data['a'], 'mask') assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 'data/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 'data/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert not hasattr(data['Fit'], 'mask') def test_null_Ipac(): f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[('ra', '|b1'), ('dec', '|b1'), ('sai', '|b1'), ('v2', '|b1'), ('sptype', '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 'data/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('data/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 'data/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 'data/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 'data/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 'data/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 'data/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 'data/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 'data/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 'data/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 'data/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 'data/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 'data/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 'data/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 'data/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 'data/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 'data/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 'data/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 'data/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 'data/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 'data/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 'data/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 'data/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 'data/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 'data/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 'data/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 'data/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('data/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [False, 'force']) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ['comment 1', 'comment 2', 'comment 3'] lines = ['# a b', '# comment 1', '# comment 2', '# comment 3', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines.insert(3, lines.pop(2)) dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast) assert dat.meta['comments'] == comments assert dat.colnames == ['a', 'b'] lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta assert dat.colnames == ['a', 'b'] def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('data/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('data/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('data/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, 'de_DE') else: locale.setlocale(locale.LC_ALL, 'de_DE.utf8') for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip(f'Locale error: {e}') finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': '# à b è \n 1 2 héllo', 'csv': 'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('data/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 'data/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 'data/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2']) @pytest.mark.parametrize('enable', [True, False, 'force']) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence. """ # Fails for enable=(True, 'force') - #5578 ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable)) assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Tab if (enable is False) else ascii.FastTab) for k in get_read_trace(): if not k.get('status', 'Disabled').startswith('Disabled'): assert k.get('kwargs').get('fast_reader').get('enable') is enable
MSeifert04/astropy
astropy/io/ascii/tests/test_read.py
astropy/modeling/setup_package.py
import inspect import os import warnings from pathlib import Path import impala import pytest import ibis import ibis.expr.types as ir import ibis.util as util from ibis import options from ibis.backends.tests.base import ( BackendTest, RoundAwayFromZero, UnorderedComparator, ) from ibis.tests.expr.mocks import MockConnection class TestConf(UnorderedComparator, BackendTest, RoundAwayFromZero): supports_arrays = True supports_arrays_outside_of_select = False check_dtype = False supports_divide_by_zero = True returned_timestamp_unit = 's' @staticmethod def connect(data_directory: Path): from ibis.backends.impala.tests.conftest import IbisTestEnv env = IbisTestEnv() hdfs_client = ibis.impala.hdfs_connect( host=env.nn_host, port=env.webhdfs_port, auth_mechanism=env.auth_mechanism, verify=env.auth_mechanism not in ['GSSAPI', 'LDAP'], user=env.webhdfs_user, ) auth_mechanism = env.auth_mechanism if auth_mechanism == 'GSSAPI' or auth_mechanism == 'LDAP': print("Warning: ignoring invalid Certificate Authority errors") return ibis.impala.connect( host=env.impala_host, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=hdfs_client, database='ibis_testing', ) @property def batting(self) -> ir.TableExpr: return None @property def awards_players(self) -> ir.TableExpr: return None def isproperty(obj): return isinstance(obj, property) class IbisTestEnv: def items(self): return [ (name, getattr(self, name)) for name, _ in inspect.getmembers(type(self), predicate=isproperty) ] def __repr__(self): lines = map('{}={!r},'.format, *zip(*self.items())) return '{}(\n{}\n)'.format( type(self).__name__, util.indent('\n'.join(lines), 4) ) @property def impala_host(self): return os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost') @property def impala_port(self): return int(os.environ.get('IBIS_TEST_IMPALA_PORT', 21050)) @property def tmp_db(self): options.impala.temp_db = tmp_db = os.environ.get( 'IBIS_TEST_TMP_DB', 'ibis_testing_tmp_db' ) return tmp_db options.impala.temp_hdfs_path = tmp_dir = os.environ.get( 'IBIS_TEST_TMP_HDFS_DIR', '/tmp/__ibis_test_{}'.format(util.guid()) ) @property def test_data_db(self): return os.environ.get('IBIS_TEST_DATA_DB', 'ibis_testing') @property def test_data_dir(self): return os.environ.get( 'IBIS_TEST_DATA_HDFS_DIR', '/__ibis/ibis-testing-data' ) @property def nn_host(self): return os.environ.get('IBIS_TEST_NN_HOST', 'localhost') @property def webhdfs_port(self): # 5070 is default for impala dev env return int(os.environ.get('IBIS_TEST_WEBHDFS_PORT', 50070)) @property def hdfs_superuser(self): return os.environ.get('IBIS_TEST_HDFS_SUPERUSER', 'hdfs') @property def use_codegen(self): return ( os.environ.get('IBIS_TEST_USE_CODEGEN', 'False').lower() == 'true' ) @property def auth_mechanism(self): return os.environ.get('IBIS_TEST_AUTH_MECH', 'NOSASL') @property def webhdfs_user(self): return os.environ.get('IBIS_TEST_WEBHDFS_USER', 'hdfs') @pytest.fixture(scope='session') def env(): return IbisTestEnv() @pytest.fixture(scope='session') def tmp_dir(env): options.impala.temp_hdfs_path = tmp_dir = env.tmp_dir return tmp_dir @pytest.fixture(scope='session') def test_data_db(env): return env.test_data_db @pytest.fixture(scope='session') def test_data_dir(env): return env.test_data_dir @pytest.fixture(scope='session') def hdfs_superuser(env): return env.hdfs_superuser return os.environ.get('IBIS_TEST_HDFS_SUPERUSER', 'hdfs') @pytest.fixture(scope='session') def hdfs(env, tmp_dir): if env.auth_mechanism in {'GSSAPI', 'LDAP'}: warnings.warn("Ignoring invalid Certificate Authority errors") client = ibis.impala.hdfs_connect( host=env.nn_host, port=env.webhdfs_port, auth_mechanism=env.auth_mechanism, verify=env.auth_mechanism not in {'GSSAPI', 'LDAP'}, user=env.webhdfs_user, ) if not client.exists(tmp_dir): client.mkdir(tmp_dir) client.chmod(tmp_dir, '777') return client @pytest.fixture(scope='session') def con_no_hdfs(env, test_data_db): con = ibis.impala.connect( host=env.impala_host, database=test_data_db, port=env.impala_port, auth_mechanism=env.auth_mechanism, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(test_data_db) @pytest.fixture(scope='session') def con(env, hdfs, test_data_db): con = ibis.impala.connect( host=env.impala_host, database=test_data_db, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=hdfs, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(test_data_db) @pytest.fixture(scope='session') def temp_char_table(con): statement = """\ CREATE TABLE IF NOT EXISTS {} ( `group1` varchar(10), `group2` char(10) )""" name = 'testing_varchar_support' sql = statement.format(name) con.con.execute(sql) try: yield con.table(name) finally: assert con.exists_table(name), name con.drop_table(name) @pytest.fixture(scope='session') def tmp_db(env, con, test_data_db): tmp_db = env.tmp_db if not con.exists_database(tmp_db): con.create_database(tmp_db) try: yield tmp_db finally: con.set_database(test_data_db) try: con.drop_database(tmp_db, force=True) except impala.error.HiveServer2Error: # The database can be dropped by another process during tear down # in the middle of dropping this one if tests are running in # parallel. # # We only care that it gets dropped before all tests are finished # running. pass @pytest.fixture(scope='session') def con_no_db(env, hdfs): con = ibis.impala.connect( host=env.impala_host, database=None, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=hdfs, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(None) @pytest.fixture(scope='session') def alltypes(con, test_data_db): return con.database(test_data_db).functional_alltypes @pytest.fixture(scope='session') def alltypes_df(alltypes): return alltypes.execute() def _random_identifier(suffix): return '__ibis_test_{}_{}'.format(suffix, util.guid()) @pytest.fixture def temp_database(con, test_data_db): name = _random_identifier('database') con.create_database(name) try: yield name finally: con.set_database(test_data_db) con.drop_database(name, force=True) @pytest.fixture def temp_table(con): name = _random_identifier('table') try: yield name finally: assert con.exists_table(name), name con.drop_table(name) @pytest.fixture def temp_table_db(con, temp_database): name = _random_identifier('table') try: yield temp_database, name finally: assert con.exists_table(name, database=temp_database), name con.drop_table(name, database=temp_database) @pytest.fixture def temp_view(con): name = _random_identifier('view') try: yield name finally: assert con.exists_table(name), name con.drop_view(name) @pytest.fixture def temp_view_db(con, temp_database): name = _random_identifier('view') try: yield temp_database, name finally: assert con.exists_table(name, database=temp_database), name con.drop_view(name, database=temp_database) @pytest.fixture def temp_parquet_table_schema(): return ibis.schema( [('id', 'int32'), ('name', 'string'), ('files', 'int32')] ) @pytest.fixture def temp_parquet_table(con, tmp_db, temp_parquet_table_schema): name = util.guid() db = con.database(tmp_db) db.create_table(name, schema=temp_parquet_table_schema, format='parquet') try: yield db[name] finally: db.client.drop_table(name, database=tmp_db) @pytest.fixture def temp_parquet_table2(con, tmp_db, temp_parquet_table_schema): name = util.guid() db = con.database(tmp_db) db.create_table(name, schema=temp_parquet_table_schema, format='parquet') try: yield db[name] finally: db.client.drop_table(name, database=tmp_db) @pytest.fixture def mockcon(): return MockConnection() @pytest.fixture(scope='session') def kudu_table(con, test_data_db): name = 'kudu_backed_table' con.raw_sql( """\ CREATE TABLE {database}.{name} ( a STRING, PRIMARY KEY(a) ) PARTITION BY HASH PARTITIONS 2 STORED AS KUDU TBLPROPERTIES ( 'kudu.master_addresses' = 'kudu', 'kudu.num_tablet_replicas' = '1' )""".format( database=test_data_db, name=name ) ) drop_sql = 'DROP TABLE {database}.{name}'.format( database=test_data_db, name=name ) try: yield con.table(name) finally: con.raw_sql(drop_sql)
import tempfile from pathlib import Path import numpy as np import pandas as pd import pytest import ibis import ibis.common.exceptions as com import ibis.expr.datatypes as dt from ibis.expr.window import window from ibis.udf.vectorized import analytic, elementwise, reduction pytestmark = pytest.mark.udf def _format_udf_return_type(func, result_formatter): """Call the given udf and return its result according to the given format (e.g. in the form of a list, pd.Series, np.array, etc.)""" def _wrapper(*args, **kwargs): result = func(*args, **kwargs) return result_formatter(result) return _wrapper def _format_struct_udf_return_type(func, result_formatter): """Call the given struct udf and return its result according to the given format (e.g. in the form of a list, pd.Series, np.array, etc.)""" def _wrapper(*args, **kwargs): result = func(*args, **kwargs) return result_formatter(*result) return _wrapper # elementwise UDF def add_one(s): assert isinstance(s, pd.Series) return s + 1 def create_add_one_udf(result_formatter): return elementwise(input_type=[dt.double], output_type=dt.double)( _format_udf_return_type(add_one, result_formatter) ) add_one_udfs = [ create_add_one_udf(result_formatter=lambda v: v), # pd.Series, create_add_one_udf(result_formatter=lambda v: np.array(v)), # np.array, create_add_one_udf(result_formatter=lambda v: list(v)), # list, ] # analytic UDF def calc_zscore(s): assert isinstance(s, pd.Series) return (s - s.mean()) / s.std() def create_calc_zscore_udf(result_formatter): return analytic(input_type=[dt.double], output_type=dt.double)( _format_udf_return_type(calc_zscore, result_formatter) ) calc_zscore_udfs = [ create_calc_zscore_udf(result_formatter=lambda v: v), # pd.Series, create_calc_zscore_udf( result_formatter=lambda v: np.array(v) ), # np.array, create_calc_zscore_udf(result_formatter=lambda v: list(v)), # list, ] @reduction(input_type=[dt.double], output_type=dt.double) def calc_mean(s): assert isinstance(s, (np.ndarray, pd.Series)) return s.mean() # elementwise multi-column UDF def add_one_struct(v): assert isinstance(v, pd.Series) return v + 1, v + 2 def create_add_one_struct_udf(result_formatter): return elementwise( input_type=[dt.double], output_type=dt.Struct(['col1', 'col2'], [dt.double, dt.double]), )(_format_struct_udf_return_type(add_one_struct, result_formatter)) add_one_struct_udfs = [ create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of pd.Series, create_add_one_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of pd.Series, create_add_one_struct_udf( result_formatter=lambda v1, v2: (np.array(v1), np.array(v2)) ), # tuple of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: [np.array(v1), np.array(v2)] ), # list of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: np.array([np.array(v1), np.array(v2)]) ), # np.array of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: pd.DataFrame({'col1': v1, 'col2': v2}) ), # pd.DataFrame, ] @elementwise( input_type=[dt.double], output_type=dt.Struct(['double_col', 'col2'], [dt.double, dt.double]), ) def overwrite_struct_elementwise(v): assert isinstance(v, pd.Series) return v + 1, v + 2 @elementwise( input_type=[dt.double], output_type=dt.Struct( ['double_col', 'col2', 'float_col'], [dt.double, dt.double, dt.double] ), ) def multiple_overwrite_struct_elementwise(v): assert isinstance(v, pd.Series) return v + 1, v + 2, v + 3 @analytic( input_type=[dt.double, dt.double], output_type=dt.Struct( ['double_col', 'demean_weight'], [dt.double, dt.double] ), ) def overwrite_struct_analytic(v, w): assert isinstance(v, pd.Series) assert isinstance(w, pd.Series) return v - v.mean(), w - w.mean() # analytic multi-column UDF def demean_struct(v, w): assert isinstance(v, pd.Series) assert isinstance(w, pd.Series) return v - v.mean(), w - w.mean() def create_demean_struct_udf(result_formatter): return analytic( input_type=[dt.double, dt.double], output_type=dt.Struct( ['demean', 'demean_weight'], [dt.double, dt.double] ), )(_format_struct_udf_return_type(demean_struct, result_formatter)) demean_struct_udfs = [ create_demean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of pd.Series, create_demean_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of pd.Series, create_demean_struct_udf( result_formatter=lambda v1, v2: (np.array(v1), np.array(v2)) ), # tuple of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: [np.array(v1), np.array(v2)] ), # list of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: np.array([np.array(v1), np.array(v2)]) ), # np.array of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: pd.DataFrame( {'demean': v1, 'demean_weight': v2} ) ), # pd.DataFrame, ] # reduction multi-column UDF def mean_struct(v, w): assert isinstance(v, (np.ndarray, pd.Series)) assert isinstance(w, (np.ndarray, pd.Series)) return v.mean(), w.mean() def create_mean_struct_udf(result_formatter): return reduction( input_type=[dt.double, dt.double], output_type=dt.Struct(['mean', 'mean_weight'], [dt.double, dt.double]), )(_format_struct_udf_return_type(mean_struct, result_formatter)) mean_struct_udfs = [ create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of scalar, create_mean_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of scalar, create_mean_struct_udf( result_formatter=lambda v1, v2: np.array([v1, v2]) ), # np.array of scalar ] @reduction( input_type=[dt.double, dt.double], output_type=dt.Struct( ['double_col', 'mean_weight'], [dt.double, dt.double] ), ) def overwrite_struct_reduction(v, w): assert isinstance(v, (np.ndarray, pd.Series)) assert isinstance(w, (np.ndarray, pd.Series)) return v.mean(), w.mean() @reduction( input_type=[dt.double], output_type=dt.Array(dt.double), ) def quantiles(series, *, quantiles): return series.quantile(quantiles) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf(backend, alltypes, df): add_one_udf = create_add_one_udf(result_formatter=lambda v: v) result = add_one_udf(alltypes['double_col']).execute() expected = add_one_udf.func(df['double_col']) backend.assert_series_equal(result, expected, check_names=False) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', add_one_udfs) def test_elementwise_udf_mutate(backend, alltypes, df, udf): expr = alltypes.mutate(incremented=udf(alltypes['double_col'])) result = expr.execute() expected = df.assign(incremented=udf.func(df['double_col'])) backend.assert_series_equal(result['incremented'], expected['incremented']) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_analytic_udf(backend, alltypes, df): calc_zscore_udf = create_calc_zscore_udf(result_formatter=lambda v: v) result = calc_zscore_udf(alltypes['double_col']).execute() expected = calc_zscore_udf.func(df['double_col']) backend.assert_series_equal(result, expected, check_names=False) @pytest.mark.only_on_backends(['pandas', 'pyspark']) # TODO - windowing - #2553 @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', calc_zscore_udfs) def test_analytic_udf_mutate(backend, alltypes, df, udf): expr = alltypes.mutate(zscore=udf(alltypes['double_col'])) result = expr.execute() expected = df.assign(zscore=udf.func(df['double_col'])) backend.assert_series_equal(result['zscore'], expected['zscore']) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf(backend, alltypes, df): result = calc_mean(alltypes['double_col']).execute() expected = df['double_col'].mean() assert result == expected @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf_array_return_type(backend, alltypes, df): """Tests reduction UDF returning an array.""" qs = [0.25, 0.75] expr = alltypes.mutate(q=quantiles(alltypes['int_col'], quantiles=qs)) result = expr.execute() expected = df.assign( q=pd.Series([quantiles.func(df['int_col'], quantiles=qs)]) .repeat(len(df)) .reset_index(drop=True) ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_output_type_in_list_invalid(backend, alltypes, df): # Test that an error is raised if UDF output type is wrapped in a list with pytest.raises( com.IbisTypeError, match="The output type of a UDF must be a single datatype.", ): @elementwise(input_type=[dt.double], output_type=[dt.double]) def add_one(s): return s + 1 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_kwargs(backend, alltypes, df): # Test different forms of UDF definition with keyword arguments @elementwise(input_type=[dt.double], output_type=dt.double) def foo1(v): # Basic UDF with kwargs return v + 1 @elementwise(input_type=[dt.double], output_type=dt.double) def foo2(v, *, amount): # UDF with keyword only arguments return v + amount @elementwise(input_type=[dt.double], output_type=dt.double) def foo3(v, **kwargs): # UDF with kwargs return v + kwargs.get('amount', 1) result = alltypes.mutate( v1=foo1(alltypes['double_col']), v2=foo2(alltypes['double_col'], amount=1), v3=foo2(alltypes['double_col'], amount=2), v4=foo3(alltypes['double_col']), v5=foo3(alltypes['double_col'], amount=2), v6=foo3(alltypes['double_col'], amount=3), ).execute() expected = df.assign( v1=df['double_col'] + 1, v2=df['double_col'] + 1, v3=df['double_col'] + 2, v4=df['double_col'] + 1, v5=df['double_col'] + 2, v6=df['double_col'] + 3, ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_args(backend, alltypes, df): # Test different forms of UDF definition with *args @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo1(*args): return args[0] + args[1] @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo2(v, *args): return v + args[0] result = alltypes.mutate( v1=foo1(alltypes['double_col'], alltypes['int_col']), v2=foo2(alltypes['double_col'], alltypes['int_col']), ).execute() expected = df.assign( v1=df['double_col'] + df['int_col'], v2=df['double_col'] + df['int_col'], ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_args_and_kwargs(backend, alltypes, df): # Test UDFs with both *args and keyword arguments @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo1(*args, amount): # UDF with *args and a keyword-only argument return args[0] + args[1] + amount @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo2(*args, **kwargs): # UDF with *args and **kwargs return args[0] + args[1] + kwargs.get('amount', 1) @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo3(v, *args, amount): # UDF with an explicit positional argument, *args, and a keyword-only # argument return v + args[0] + amount @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo4(v, *args, **kwargs): # UDF with an explicit positional argument, *args, and **kwargs return v + args[0] + kwargs.get('amount', 1) result = alltypes.mutate( v1=foo1(alltypes['double_col'], alltypes['int_col'], amount=2), v2=foo2(alltypes['double_col'], alltypes['int_col'], amount=2), v3=foo3(alltypes['double_col'], alltypes['int_col'], amount=2), v4=foo4(alltypes['double_col'], alltypes['int_col'], amount=2), ).execute() expected = df.assign( v1=df['double_col'] + df['int_col'] + 2, v2=df['double_col'] + df['int_col'] + 2, v3=df['double_col'] + df['int_col'] + 2, v4=df['double_col'] + df['int_col'] + 2, ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_invalid_kwargs(backend, alltypes): # Test that defining a UDF with a non-column argument that is not a # keyword argument raises an error with pytest.raises(TypeError, match=".*must be defined as keyword only.*"): @elementwise(input_type=[dt.double], output_type=dt.double) def foo1(v, amount): return v + 1 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', add_one_struct_udfs) def test_elementwise_udf_destruct(backend, alltypes, udf): result = alltypes.mutate( udf(alltypes['double_col']).destructure() ).execute() expected = alltypes.mutate( col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_overwrite_destruct(backend, alltypes): result = alltypes.mutate( overwrite_struct_elementwise(alltypes['double_col']).destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_overwrite_destruct_and_assign(backend, alltypes): result = ( alltypes.mutate( overwrite_struct_elementwise(alltypes['double_col']).destructure() ) .mutate(col3=alltypes['int_col'] * 3) .execute() ) expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, col3=alltypes['int_col'] * 3, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.min_spark_version('3.1') def test_elementwise_udf_destruct_exact_once(backend, alltypes): with tempfile.TemporaryDirectory() as tempdir: @elementwise( input_type=[dt.double], output_type=dt.Struct(['col1', 'col2'], [dt.double, dt.double]), ) def add_one_struct_exact_once(v): key = v.iloc[0] path = Path(f"{tempdir}/{key}") assert not path.exists() path.touch() return v + 1, v + 2 result = alltypes.mutate( add_one_struct_exact_once(alltypes['index']).destructure() ) result = result.execute() assert len(result) > 0 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_multiple_overwrite_destruct(backend, alltypes): result = alltypes.mutate( multiple_overwrite_struct_elementwise( alltypes['double_col'] ).destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, float_col=alltypes['double_col'] + 3, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_named_destruct(backend, alltypes): """Test error when assigning name to a destruct column.""" add_one_struct_udf = create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) with pytest.raises( com.ExpressionError, match=r".*Cannot name a destruct.*" ): alltypes.mutate( new_struct=add_one_struct_udf(alltypes['double_col']).destructure() ) @pytest.mark.only_on_backends(['pyspark']) @pytest.mark.xfail_unsupported def test_elementwise_udf_struct(backend, alltypes): add_one_struct_udf = create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( new_col=add_one_struct_udf(alltypes['double_col']) ).execute() result = result.assign( col1=result['new_col'].apply(lambda x: x[0]), col2=result['new_col'].apply(lambda x: x[1]), ) result = result.drop('new_col', axis=1) expected = alltypes.mutate( col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) @pytest.mark.parametrize('udf', demean_struct_udfs) def test_analytic_udf_destruct(backend, alltypes, udf): w = window(preceding=None, following=None, group_by='year') result = alltypes.mutate( udf(alltypes['double_col'], alltypes['int_col']).over(w).destructure() ).execute() expected = alltypes.mutate( demean=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas']) # TODO - udf - #2553 @pytest.mark.xfail_backends(['dask']) def test_analytic_udf_destruct_no_groupby(backend, alltypes): w = window(preceding=None, following=None) demean_struct_udf = create_demean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( demean_struct_udf(alltypes['double_col'], alltypes['int_col']) .over(w) .destructure() ).execute() expected = alltypes.mutate( demean=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) @pytest.mark.xfail_unsupported def test_analytic_udf_destruct_overwrite(backend, alltypes): w = window(preceding=None, following=None, group_by='year') result = alltypes.mutate( overwrite_struct_analytic(alltypes['double_col'], alltypes['int_col']) .over(w) .destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'dask']) @pytest.mark.parametrize('udf', mean_struct_udfs) def test_reduction_udf_destruct_groupby(backend, alltypes, udf): result = ( alltypes.groupby('year') .aggregate( udf(alltypes['double_col'], alltypes['int_col']).destructure() ) .execute() ).sort_values('year') expected = ( alltypes.groupby('year') .aggregate( mean=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ) .execute() ).sort_values('year') backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'dask']) def test_reduction_udf_destruct_no_groupby(backend, alltypes): mean_struct_udf = create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.aggregate( mean_struct_udf( alltypes['double_col'], alltypes['int_col'] ).destructure() ).execute() expected = alltypes.aggregate( mean=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf_destruct_no_groupby_overwrite(backend, alltypes): result = alltypes.aggregate( overwrite_struct_reduction( alltypes['double_col'], alltypes['int_col'] ).destructure() ).execute() expected = alltypes.aggregate( double_col=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) def test_reduction_udf_destruct_window(backend, alltypes): win = window( preceding=ibis.interval(hours=2), following=0, group_by='year', order_by='timestamp_col', ) mean_struct_udf = create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( mean_struct_udf(alltypes['double_col'], alltypes['int_col']) .over(win) .destructure() ).execute() expected = alltypes.mutate( mean=alltypes['double_col'].mean().over(win), mean_weight=alltypes['int_col'].mean().over(win), ).execute() backend.assert_frame_equal(result, expected)
ibis-project/ibis
ibis/backends/tests/test_vectorized_udf.py
ibis/backends/impala/tests/conftest.py
import itertools import dask.dataframe as dd import dask.dataframe.groupby as ddgb import numpy as np import pandas import toolz from pandas import isnull import ibis import ibis.expr.operations as ops from ibis.backends.pandas.core import integer_types, scalar_types from ibis.backends.pandas.execution.strings import ( execute_series_join_scalar_sep, execute_series_regex_extract, execute_series_regex_replace, execute_series_regex_search, execute_series_right, execute_series_translate_scalar_scalar, execute_series_translate_scalar_series, execute_series_translate_series_scalar, execute_series_translate_series_series, execute_string_capitalize, execute_string_contains, execute_string_length_series, execute_string_like_series_string, execute_string_lower, execute_string_lpad, execute_string_lstrip, execute_string_repeat, execute_string_reverse, execute_string_rpad, execute_string_rstrip, execute_string_strip, execute_string_upper, execute_substring_int_int, haystack_to_series_of_lists, ) from ..dispatch import execute_node from .util import ( TypeRegistrationDict, make_selected_obj, register_types_to_dispatcher, ) DASK_DISPATCH_TYPES: TypeRegistrationDict = { ops.StringLength: [((dd.Series,), execute_string_length_series)], ops.Substring: [ ( (dd.Series, integer_types, integer_types,), execute_substring_int_int, ), ], ops.Strip: [((dd.Series,), execute_string_strip)], ops.LStrip: [((dd.Series,), execute_string_lstrip)], ops.RStrip: [((dd.Series,), execute_string_rstrip)], ops.LPad: [ ( (dd.Series, (dd.Series,) + integer_types, (dd.Series, str),), execute_string_lpad, ), ], ops.RPad: [ ( (dd.Series, (dd.Series,) + integer_types, (dd.Series, str),), execute_string_rpad, ), ], ops.Reverse: [((dd.Series,), execute_string_reverse)], ops.Lowercase: [((dd.Series,), execute_string_lower)], ops.Uppercase: [((dd.Series,), execute_string_upper)], ops.Capitalize: [((dd.Series,), execute_string_capitalize)], ops.Repeat: [ ((dd.Series, (dd.Series,) + integer_types), execute_string_repeat), ], ops.StringFind: [ ( ( dd.Series, (dd.Series, str), (dd.Series, type(None)) + integer_types, (dd.Series, type(None)) + integer_types, ), execute_string_contains, ) ], ops.StringSQLLike: [ ( (dd.Series, str, (str, type(None)),), execute_string_like_series_string, ), ], ops.RegexSearch: [((dd.Series, str,), execute_series_regex_search)], ops.RegexExtract: [ ( (dd.Series, (dd.Series, str), integer_types), execute_series_regex_extract, ), ], ops.RegexReplace: [ ((dd.Series, str, str,), execute_series_regex_replace), ], ops.Translate: [ ( (dd.Series, dd.Series, dd.Series), execute_series_translate_series_series, ), ((dd.Series, dd.Series, str), execute_series_translate_series_scalar), ((dd.Series, str, dd.Series), execute_series_translate_scalar_series), ((dd.Series, str, str), execute_series_translate_scalar_scalar), ], ops.StrRight: [((dd.Series, integer_types), execute_series_right)], ops.StringJoin: [ (((dd.Series, str), list), execute_series_join_scalar_sep), ], } register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES) @execute_node.register(ops.Substring, dd.Series, dd.Series, integer_types) def execute_substring_series_int(op, data, start, length, **kwargs): return execute_substring_series_series( op, data, start, dd.from_array(np.repeat(length, len(start))), **kwargs ) @execute_node.register(ops.Substring, dd.Series, integer_types, dd.Series) def execute_string_substring_int_series(op, data, start, length, **kwargs): return execute_substring_series_series( op, data, dd.from_array(np.repeat(start, len(length))), length, **kwargs, ) # TODO - substring - #2553 @execute_node.register(ops.Substring, dd.Series, dd.Series, dd.Series) def execute_substring_series_series(op, data, start, length, **kwargs): end = start + length # TODO - this is broken def iterate( value, start_iter=start.iteritems(), end_iter=end.iteritems(), ): _, begin = next(start_iter) _, end = next(end_iter) if (begin is not None and isnull(begin)) or ( end is not None and isnull(end) ): return None return value[begin:end] return data.map(iterate) @execute_node.register(ops.StringSQLLike, ddgb.SeriesGroupBy, str, str) def execute_string_like_series_groupby_string( op, data, pattern, escape, **kwargs ): return execute_string_like_series_string( op, make_selected_obj(data), pattern, escape, **kwargs ).groupby(data.grouper.groupings) # TODO - aggregations - #2553 @execute_node.register( ops.GroupConcat, dd.Series, str, (dd.Series, type(None)) ) def execute_group_concat_series_mask( op, data, sep, mask, aggcontext=None, **kwargs ): return aggcontext.agg( data[mask] if mask is not None else data, lambda series, sep=sep: sep.join(series.values), ) @execute_node.register(ops.GroupConcat, ddgb.SeriesGroupBy, str, type(None)) def execute_group_concat_series_gb( op, data, sep, _, aggcontext=None, **kwargs ): custom_group_concat = dd.Aggregation( name='custom_group_concat', chunk=lambda s: s.apply(list), agg=lambda s0: s0.apply( lambda chunks: sep.join( str(s) for s in itertools.chain.from_iterable(chunks) ) ), ) return data.agg(custom_group_concat) # TODO - aggregations - #2553 @execute_node.register( ops.GroupConcat, ddgb.SeriesGroupBy, str, ddgb.SeriesGroupBy ) def execute_group_concat_series_gb_mask( op, data, sep, mask, aggcontext=None, **kwargs ): def method(series, sep=sep): return sep.join(series.values.astype(str)) return aggcontext.agg( data, lambda data, mask=mask.obj, method=method: method( data[mask[data.index]] ), ) @execute_node.register(ops.StringAscii, dd.Series) def execute_string_ascii(op, data, **kwargs): output_meta = pandas.Series([], dtype=np.dtype('int32'), name=data.name) return data.map(ord, meta=output_meta) @execute_node.register(ops.StringAscii, ddgb.SeriesGroupBy) def execute_string_ascii_group_by(op, data, **kwargs): return execute_string_ascii(op, make_selected_obj(data), **kwargs).groupby( data.index ) @execute_node.register(ops.RegexSearch, ddgb.SeriesGroupBy, str) def execute_series_regex_search_gb(op, data, pattern, **kwargs): return execute_series_regex_search( op, make_selected_obj(data), getattr(pattern, 'obj', pattern), **kwargs, ).groupby(data.index) @execute_node.register( ops.RegexExtract, ddgb.SeriesGroupBy, str, integer_types ) def execute_series_regex_extract_gb(op, data, pattern, index, **kwargs): return execute_series_regex_extract( op, make_selected_obj(data), pattern, index, **kwargs ).groupby(data.index) @execute_node.register(ops.RegexReplace, ddgb.SeriesGroupBy, str, str) def execute_series_regex_replace_gb(op, data, pattern, replacement, **kwargs): return execute_series_regex_replace( make_selected_obj(data), pattern, replacement, **kwargs ).groupby(data.index) @execute_node.register(ops.StrRight, ddgb.SeriesGroupBy, integer_types) def execute_series_right_gb(op, data, nchars, **kwargs): return execute_series_right(op, make_selected_obj(data), nchars).groupby( data.index ) def haystack_to_dask_series_of_lists(haystack, index=None): pieces = haystack_to_series_of_lists(haystack, index) return dd.from_pandas(pieces, npartitions=1) @execute_node.register(ops.FindInSet, dd.Series, list) def execute_series_find_in_set(op, needle, haystack, **kwargs): def find_in_set(index, elements): return ibis.util.safe_index(elements, index) return needle.apply(find_in_set, args=(haystack,)) @execute_node.register(ops.FindInSet, ddgb.SeriesGroupBy, list) def execute_series_group_by_find_in_set(op, needle, haystack, **kwargs): pieces = [getattr(piece, 'obj', piece) for piece in haystack] return execute_series_find_in_set( op, make_selected_obj(needle), pieces, **kwargs ).groupby(needle.index) # TODO we need this version not pandas @execute_node.register(ops.FindInSet, scalar_types, list) def execute_string_group_by_find_in_set(op, needle, haystack, **kwargs): # `list` could contain series, series groupbys, or scalars # mixing series and series groupbys is not allowed series_in_haystack = [ type(piece) for piece in haystack if isinstance(piece, (dd.Series, ddgb.SeriesGroupBy)) ] if not series_in_haystack: return ibis.util.safe_index(haystack, needle) try: (collection_type,) = frozenset(map(type, series_in_haystack)) except ValueError: raise ValueError('Mixing Series and ddgb.SeriesGroupBy is not allowed') pieces = haystack_to_dask_series_of_lists( [getattr(piece, 'obj', piece) for piece in haystack] ) result = pieces.map(toolz.flip(ibis.util.safe_index)(needle)) if issubclass(collection_type, dd.Series): return result assert issubclass(collection_type, ddgb.SeriesGroupBy) return result.groupby( toolz.first( piece.grouper.groupings for piece in haystack if hasattr(piece, 'grouper') ) )
import tempfile from pathlib import Path import numpy as np import pandas as pd import pytest import ibis import ibis.common.exceptions as com import ibis.expr.datatypes as dt from ibis.expr.window import window from ibis.udf.vectorized import analytic, elementwise, reduction pytestmark = pytest.mark.udf def _format_udf_return_type(func, result_formatter): """Call the given udf and return its result according to the given format (e.g. in the form of a list, pd.Series, np.array, etc.)""" def _wrapper(*args, **kwargs): result = func(*args, **kwargs) return result_formatter(result) return _wrapper def _format_struct_udf_return_type(func, result_formatter): """Call the given struct udf and return its result according to the given format (e.g. in the form of a list, pd.Series, np.array, etc.)""" def _wrapper(*args, **kwargs): result = func(*args, **kwargs) return result_formatter(*result) return _wrapper # elementwise UDF def add_one(s): assert isinstance(s, pd.Series) return s + 1 def create_add_one_udf(result_formatter): return elementwise(input_type=[dt.double], output_type=dt.double)( _format_udf_return_type(add_one, result_formatter) ) add_one_udfs = [ create_add_one_udf(result_formatter=lambda v: v), # pd.Series, create_add_one_udf(result_formatter=lambda v: np.array(v)), # np.array, create_add_one_udf(result_formatter=lambda v: list(v)), # list, ] # analytic UDF def calc_zscore(s): assert isinstance(s, pd.Series) return (s - s.mean()) / s.std() def create_calc_zscore_udf(result_formatter): return analytic(input_type=[dt.double], output_type=dt.double)( _format_udf_return_type(calc_zscore, result_formatter) ) calc_zscore_udfs = [ create_calc_zscore_udf(result_formatter=lambda v: v), # pd.Series, create_calc_zscore_udf( result_formatter=lambda v: np.array(v) ), # np.array, create_calc_zscore_udf(result_formatter=lambda v: list(v)), # list, ] @reduction(input_type=[dt.double], output_type=dt.double) def calc_mean(s): assert isinstance(s, (np.ndarray, pd.Series)) return s.mean() # elementwise multi-column UDF def add_one_struct(v): assert isinstance(v, pd.Series) return v + 1, v + 2 def create_add_one_struct_udf(result_formatter): return elementwise( input_type=[dt.double], output_type=dt.Struct(['col1', 'col2'], [dt.double, dt.double]), )(_format_struct_udf_return_type(add_one_struct, result_formatter)) add_one_struct_udfs = [ create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of pd.Series, create_add_one_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of pd.Series, create_add_one_struct_udf( result_formatter=lambda v1, v2: (np.array(v1), np.array(v2)) ), # tuple of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: [np.array(v1), np.array(v2)] ), # list of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: np.array([np.array(v1), np.array(v2)]) ), # np.array of np.array, create_add_one_struct_udf( result_formatter=lambda v1, v2: pd.DataFrame({'col1': v1, 'col2': v2}) ), # pd.DataFrame, ] @elementwise( input_type=[dt.double], output_type=dt.Struct(['double_col', 'col2'], [dt.double, dt.double]), ) def overwrite_struct_elementwise(v): assert isinstance(v, pd.Series) return v + 1, v + 2 @elementwise( input_type=[dt.double], output_type=dt.Struct( ['double_col', 'col2', 'float_col'], [dt.double, dt.double, dt.double] ), ) def multiple_overwrite_struct_elementwise(v): assert isinstance(v, pd.Series) return v + 1, v + 2, v + 3 @analytic( input_type=[dt.double, dt.double], output_type=dt.Struct( ['double_col', 'demean_weight'], [dt.double, dt.double] ), ) def overwrite_struct_analytic(v, w): assert isinstance(v, pd.Series) assert isinstance(w, pd.Series) return v - v.mean(), w - w.mean() # analytic multi-column UDF def demean_struct(v, w): assert isinstance(v, pd.Series) assert isinstance(w, pd.Series) return v - v.mean(), w - w.mean() def create_demean_struct_udf(result_formatter): return analytic( input_type=[dt.double, dt.double], output_type=dt.Struct( ['demean', 'demean_weight'], [dt.double, dt.double] ), )(_format_struct_udf_return_type(demean_struct, result_formatter)) demean_struct_udfs = [ create_demean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of pd.Series, create_demean_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of pd.Series, create_demean_struct_udf( result_formatter=lambda v1, v2: (np.array(v1), np.array(v2)) ), # tuple of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: [np.array(v1), np.array(v2)] ), # list of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: np.array([np.array(v1), np.array(v2)]) ), # np.array of np.array, create_demean_struct_udf( result_formatter=lambda v1, v2: pd.DataFrame( {'demean': v1, 'demean_weight': v2} ) ), # pd.DataFrame, ] # reduction multi-column UDF def mean_struct(v, w): assert isinstance(v, (np.ndarray, pd.Series)) assert isinstance(w, (np.ndarray, pd.Series)) return v.mean(), w.mean() def create_mean_struct_udf(result_formatter): return reduction( input_type=[dt.double, dt.double], output_type=dt.Struct(['mean', 'mean_weight'], [dt.double, dt.double]), )(_format_struct_udf_return_type(mean_struct, result_formatter)) mean_struct_udfs = [ create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ), # tuple of scalar, create_mean_struct_udf( result_formatter=lambda v1, v2: [v1, v2] ), # list of scalar, create_mean_struct_udf( result_formatter=lambda v1, v2: np.array([v1, v2]) ), # np.array of scalar ] @reduction( input_type=[dt.double, dt.double], output_type=dt.Struct( ['double_col', 'mean_weight'], [dt.double, dt.double] ), ) def overwrite_struct_reduction(v, w): assert isinstance(v, (np.ndarray, pd.Series)) assert isinstance(w, (np.ndarray, pd.Series)) return v.mean(), w.mean() @reduction( input_type=[dt.double], output_type=dt.Array(dt.double), ) def quantiles(series, *, quantiles): return series.quantile(quantiles) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf(backend, alltypes, df): add_one_udf = create_add_one_udf(result_formatter=lambda v: v) result = add_one_udf(alltypes['double_col']).execute() expected = add_one_udf.func(df['double_col']) backend.assert_series_equal(result, expected, check_names=False) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', add_one_udfs) def test_elementwise_udf_mutate(backend, alltypes, df, udf): expr = alltypes.mutate(incremented=udf(alltypes['double_col'])) result = expr.execute() expected = df.assign(incremented=udf.func(df['double_col'])) backend.assert_series_equal(result['incremented'], expected['incremented']) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_analytic_udf(backend, alltypes, df): calc_zscore_udf = create_calc_zscore_udf(result_formatter=lambda v: v) result = calc_zscore_udf(alltypes['double_col']).execute() expected = calc_zscore_udf.func(df['double_col']) backend.assert_series_equal(result, expected, check_names=False) @pytest.mark.only_on_backends(['pandas', 'pyspark']) # TODO - windowing - #2553 @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', calc_zscore_udfs) def test_analytic_udf_mutate(backend, alltypes, df, udf): expr = alltypes.mutate(zscore=udf(alltypes['double_col'])) result = expr.execute() expected = df.assign(zscore=udf.func(df['double_col'])) backend.assert_series_equal(result['zscore'], expected['zscore']) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf(backend, alltypes, df): result = calc_mean(alltypes['double_col']).execute() expected = df['double_col'].mean() assert result == expected @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf_array_return_type(backend, alltypes, df): """Tests reduction UDF returning an array.""" qs = [0.25, 0.75] expr = alltypes.mutate(q=quantiles(alltypes['int_col'], quantiles=qs)) result = expr.execute() expected = df.assign( q=pd.Series([quantiles.func(df['int_col'], quantiles=qs)]) .repeat(len(df)) .reset_index(drop=True) ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_output_type_in_list_invalid(backend, alltypes, df): # Test that an error is raised if UDF output type is wrapped in a list with pytest.raises( com.IbisTypeError, match="The output type of a UDF must be a single datatype.", ): @elementwise(input_type=[dt.double], output_type=[dt.double]) def add_one(s): return s + 1 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_kwargs(backend, alltypes, df): # Test different forms of UDF definition with keyword arguments @elementwise(input_type=[dt.double], output_type=dt.double) def foo1(v): # Basic UDF with kwargs return v + 1 @elementwise(input_type=[dt.double], output_type=dt.double) def foo2(v, *, amount): # UDF with keyword only arguments return v + amount @elementwise(input_type=[dt.double], output_type=dt.double) def foo3(v, **kwargs): # UDF with kwargs return v + kwargs.get('amount', 1) result = alltypes.mutate( v1=foo1(alltypes['double_col']), v2=foo2(alltypes['double_col'], amount=1), v3=foo2(alltypes['double_col'], amount=2), v4=foo3(alltypes['double_col']), v5=foo3(alltypes['double_col'], amount=2), v6=foo3(alltypes['double_col'], amount=3), ).execute() expected = df.assign( v1=df['double_col'] + 1, v2=df['double_col'] + 1, v3=df['double_col'] + 2, v4=df['double_col'] + 1, v5=df['double_col'] + 2, v6=df['double_col'] + 3, ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_args(backend, alltypes, df): # Test different forms of UDF definition with *args @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo1(*args): return args[0] + args[1] @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo2(v, *args): return v + args[0] result = alltypes.mutate( v1=foo1(alltypes['double_col'], alltypes['int_col']), v2=foo2(alltypes['double_col'], alltypes['int_col']), ).execute() expected = df.assign( v1=df['double_col'] + df['int_col'], v2=df['double_col'] + df['int_col'], ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_valid_args_and_kwargs(backend, alltypes, df): # Test UDFs with both *args and keyword arguments @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo1(*args, amount): # UDF with *args and a keyword-only argument return args[0] + args[1] + amount @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo2(*args, **kwargs): # UDF with *args and **kwargs return args[0] + args[1] + kwargs.get('amount', 1) @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo3(v, *args, amount): # UDF with an explicit positional argument, *args, and a keyword-only # argument return v + args[0] + amount @elementwise(input_type=[dt.double, dt.int32], output_type=dt.double) def foo4(v, *args, **kwargs): # UDF with an explicit positional argument, *args, and **kwargs return v + args[0] + kwargs.get('amount', 1) result = alltypes.mutate( v1=foo1(alltypes['double_col'], alltypes['int_col'], amount=2), v2=foo2(alltypes['double_col'], alltypes['int_col'], amount=2), v3=foo3(alltypes['double_col'], alltypes['int_col'], amount=2), v4=foo4(alltypes['double_col'], alltypes['int_col'], amount=2), ).execute() expected = df.assign( v1=df['double_col'] + df['int_col'] + 2, v2=df['double_col'] + df['int_col'] + 2, v3=df['double_col'] + df['int_col'] + 2, v4=df['double_col'] + df['int_col'] + 2, ) backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_invalid_kwargs(backend, alltypes): # Test that defining a UDF with a non-column argument that is not a # keyword argument raises an error with pytest.raises(TypeError, match=".*must be defined as keyword only.*"): @elementwise(input_type=[dt.double], output_type=dt.double) def foo1(v, amount): return v + 1 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.parametrize('udf', add_one_struct_udfs) def test_elementwise_udf_destruct(backend, alltypes, udf): result = alltypes.mutate( udf(alltypes['double_col']).destructure() ).execute() expected = alltypes.mutate( col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_overwrite_destruct(backend, alltypes): result = alltypes.mutate( overwrite_struct_elementwise(alltypes['double_col']).destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_overwrite_destruct_and_assign(backend, alltypes): result = ( alltypes.mutate( overwrite_struct_elementwise(alltypes['double_col']).destructure() ) .mutate(col3=alltypes['int_col'] * 3) .execute() ) expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, col3=alltypes['int_col'] * 3, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported @pytest.mark.min_spark_version('3.1') def test_elementwise_udf_destruct_exact_once(backend, alltypes): with tempfile.TemporaryDirectory() as tempdir: @elementwise( input_type=[dt.double], output_type=dt.Struct(['col1', 'col2'], [dt.double, dt.double]), ) def add_one_struct_exact_once(v): key = v.iloc[0] path = Path(f"{tempdir}/{key}") assert not path.exists() path.touch() return v + 1, v + 2 result = alltypes.mutate( add_one_struct_exact_once(alltypes['index']).destructure() ) result = result.execute() assert len(result) > 0 @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_multiple_overwrite_destruct(backend, alltypes): result = alltypes.mutate( multiple_overwrite_struct_elementwise( alltypes['double_col'] ).destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, float_col=alltypes['double_col'] + 3, ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_elementwise_udf_named_destruct(backend, alltypes): """Test error when assigning name to a destruct column.""" add_one_struct_udf = create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) with pytest.raises( com.ExpressionError, match=r".*Cannot name a destruct.*" ): alltypes.mutate( new_struct=add_one_struct_udf(alltypes['double_col']).destructure() ) @pytest.mark.only_on_backends(['pyspark']) @pytest.mark.xfail_unsupported def test_elementwise_udf_struct(backend, alltypes): add_one_struct_udf = create_add_one_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( new_col=add_one_struct_udf(alltypes['double_col']) ).execute() result = result.assign( col1=result['new_col'].apply(lambda x: x[0]), col2=result['new_col'].apply(lambda x: x[1]), ) result = result.drop('new_col', axis=1) expected = alltypes.mutate( col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2, ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) @pytest.mark.parametrize('udf', demean_struct_udfs) def test_analytic_udf_destruct(backend, alltypes, udf): w = window(preceding=None, following=None, group_by='year') result = alltypes.mutate( udf(alltypes['double_col'], alltypes['int_col']).over(w).destructure() ).execute() expected = alltypes.mutate( demean=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas']) # TODO - udf - #2553 @pytest.mark.xfail_backends(['dask']) def test_analytic_udf_destruct_no_groupby(backend, alltypes): w = window(preceding=None, following=None) demean_struct_udf = create_demean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( demean_struct_udf(alltypes['double_col'], alltypes['int_col']) .over(w) .destructure() ).execute() expected = alltypes.mutate( demean=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) @pytest.mark.xfail_unsupported def test_analytic_udf_destruct_overwrite(backend, alltypes): w = window(preceding=None, following=None, group_by='year') result = alltypes.mutate( overwrite_struct_analytic(alltypes['double_col'], alltypes['int_col']) .over(w) .destructure() ).execute() expected = alltypes.mutate( double_col=alltypes['double_col'] - alltypes['double_col'].mean().over(w), demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w), ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas', 'dask']) @pytest.mark.parametrize('udf', mean_struct_udfs) def test_reduction_udf_destruct_groupby(backend, alltypes, udf): result = ( alltypes.groupby('year') .aggregate( udf(alltypes['double_col'], alltypes['int_col']).destructure() ) .execute() ).sort_values('year') expected = ( alltypes.groupby('year') .aggregate( mean=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ) .execute() ).sort_values('year') backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'dask']) def test_reduction_udf_destruct_no_groupby(backend, alltypes): mean_struct_udf = create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.aggregate( mean_struct_udf( alltypes['double_col'], alltypes['int_col'] ).destructure() ).execute() expected = alltypes.aggregate( mean=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ).execute() backend.assert_frame_equal(result, expected) @pytest.mark.only_on_backends(['pandas', 'pyspark', 'dask']) @pytest.mark.xfail_unsupported def test_reduction_udf_destruct_no_groupby_overwrite(backend, alltypes): result = alltypes.aggregate( overwrite_struct_reduction( alltypes['double_col'], alltypes['int_col'] ).destructure() ).execute() expected = alltypes.aggregate( double_col=alltypes['double_col'].mean(), mean_weight=alltypes['int_col'].mean(), ).execute() # TODO issue #2649 # Due to a known limitation with how we treat DestructColumn # in assignments, the ordering of op.selections may not exactly # correspond with the column ordering we want (i.e. all new columns # should appear at the end, but currently they are materialized # directly after those overwritten columns). backend.assert_frame_equal(result, expected, check_like=True) @pytest.mark.only_on_backends(['pandas']) # TODO - windowing - #2553 @pytest.mark.xfail_backends(['dask']) def test_reduction_udf_destruct_window(backend, alltypes): win = window( preceding=ibis.interval(hours=2), following=0, group_by='year', order_by='timestamp_col', ) mean_struct_udf = create_mean_struct_udf( result_formatter=lambda v1, v2: (v1, v2) ) result = alltypes.mutate( mean_struct_udf(alltypes['double_col'], alltypes['int_col']) .over(win) .destructure() ).execute() expected = alltypes.mutate( mean=alltypes['double_col'].mean().over(win), mean_weight=alltypes['int_col'].mean().over(win), ).execute() backend.assert_frame_equal(result, expected)
ibis-project/ibis
ibis/backends/tests/test_vectorized_udf.py
ibis/backends/dask/execution/strings.py
# Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import mock class BigIPMock(object): """Mock BIG-IP® object Mocks a BIG-IP® object by substituting a mock icr_session object which returns a user created mock response object. To use, create a mock response object which will get returned by any icr_session HTTP method, then create an interface object, passing in this BIG-IPMock object. Example: # Create a mock response object with status code and JSON. Here # read_json_file() is used to get mock JSON, but you can always pass # in a JSON string, or create a dictionary object and convert to JSON # using json.loads(). response = BIG-IPMock.create_mock_response( 200, BIG-IPMock.read_json_file("f5/BIG-IP/interfaces/test/pool.json") ) # Create BIG-IP® object, passing in mocked response object big_ip = BIG-IPMock(response) # Create interface object test_pool = Pool(big_ip) # Call interface method which will receive mock response object created # above when it calls the icr_session method get(). description = test_pool.get_description("my-Pool") """ def __init__(self, response=mock.Mock()): """Initializes BIG-IPMock object. :param response: Mock response object to return from icr_session calls. :return: """ self.icontrol = self._create_icontrol() self.icr_session = self._create_icr_session() self.icr_uri = 'https://host-abc/mgmt/tm' self.response = response def _create_icontrol(self): return mock.Mock() def _create_icr_session(self): """Creates a mock icr_session object. This mocked icr_session substitutes basic request library methods (get, put, post, etc.) with a method that simply returns a mocked response object. Set the response on the BIG-IPMock object before calling one of the icr_session methods. :rtype object: mock session object. """ def mock_response(url, *args, **kwargs): return self.response icr_session = mock.Mock() icr_session.delete = mock_response icr_session.get = mock_response icr_session.put = mock_response icr_session.post = mock_response icr_session.put = mock_response return icr_session @staticmethod def create_mock_response(status_code, json_str): """Creates a mock HTTP response. :param int status_code: HTTP response code to mock. :param string json: JSON string to mock. :rtype object: mock HTTP response object. """ response = mock.Mock() response.status_code = status_code response.text = json_str response.json.return_value = json.loads(json_str) return response @staticmethod def read_json_file(filename): """Reads JSON file, returning a JSON string. The file must contain a valid JSON object, for example: {"key": "value"...} or {"key": {"key": "value"}...} :param string name: Name of file containing JSON object. :rtype string: JSON object as a string. """ file = open(filename) s = file.read() assert s.__len__() > 0 return s
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import pytest from f5.bigip import ManagementRoot from f5.bigip.tm.security.dos import Application from f5.bigip.tm.security.dos import Applications from f5.bigip.tm.security.dos import Device_Config from f5.bigip.tm.security.dos import Dos_Network from f5.bigip.tm.security.dos import Dos_Networks from f5.bigip.tm.security.dos import Profile from f5.bigip.tm.security.dos import Protocol_Dns from f5.bigip.tm.security.dos import Protocol_Dns_s from f5.bigip.tm.security.dos import Protocol_Sip from f5.bigip.tm.security.dos import Protocol_Sips from f5.sdk_exception import MissingRequiredCreationParameter from f5.sdk_exception import MissingRequiredReadParameter from f5.sdk_exception import UnsupportedOperation from six import iterkeys @pytest.fixture def FakeProfile(): fake_profiles = mock.MagicMock() fake_profile = Profile(fake_profiles) return fake_profile @pytest.fixture def FakeDeviceConfig(): fake_device_configs = mock.MagicMock() fake_device_config = Device_Config(fake_device_configs) return fake_device_config def Makeprofile(fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') p = b.tm.security.dos.profiles.profile p._meta_data['uri'] = \ 'https://192.168.1.1:443/mgmt/tm/security/dos/profile/~Common' \ '~testprofile/' return p class TestDosProfile(object): def test_create_two(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') r1 = b.tm.security.dos.profiles.profile r2 = b.tm.security.dos.profiles.profile assert r1 is not r2 def test_create_no_args(self, FakeProfile): with pytest.raises(MissingRequiredCreationParameter): FakeProfile.create() class TestApplicationSubcollection(object): def test_app_subcollection(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:application:applicationstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Applications) assert kind in list(iterkeys(test_meta)) assert Application in test_meta2 def test_app_create(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) pc2 = Applications(Makeprofile(fakeicontrolsession)) r1 = pc.application r2 = pc2.application assert r1 is not r2 def test_app_create_no_args_v11(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.application.create() class TestDosNetworksSubcollection(object): def test_dosnet_subcollection(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:dos-network:dos-networkstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Dos_Networks) assert kind in list(iterkeys(test_meta)) assert Dos_Network in test_meta2 def test_dosnet_create(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) pc2 = Dos_Networks(Makeprofile(fakeicontrolsession)) r1 = pc.dos_network r2 = pc2.dos_network assert r1 is not r2 def test_dosnet_create_no_args_v11(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.dos_network.create() class TestProtocolDnsSubcollection(object): def test_dns_subcollection(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:protocol-dns:protocol-dnsstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Protocol_Dns_s) assert kind in list(iterkeys(test_meta)) assert Protocol_Dns in test_meta2 def test_dns_create(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) pc2 = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) r1 = pc.protocol_dns r2 = pc2.protocol_dns assert r1 is not r2 def test_dns_create_no_args_v11(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.protocol_dns.create() class TestProtocolSipSubcollection(object): def test_sip_subcollection(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:protocol-sip:protocol-sipstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Protocol_Sips) assert kind in list(iterkeys(test_meta)) assert Protocol_Sip in test_meta2 def test_sip_create(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) pc2 = Protocol_Sips(Makeprofile(fakeicontrolsession)) r1 = pc.protocol_sip r2 = pc2.protocol_sip assert r1 is not r2 def test_sip_create_no_args_v11(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.protocol_sip.create() class TestDosDeviceConfig(object): def test_create_raises(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') with pytest.raises(UnsupportedOperation): b.tm.security.dos.device_configs.device_config.create() def test_delete_raises(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') with pytest.raises(UnsupportedOperation): b.tm.security.dos.device_configs.device_config.delete() def test_load_no_args(self, FakeDeviceConfig): with pytest.raises(MissingRequiredReadParameter): FakeDeviceConfig.load()
F5Networks/f5-common-python
f5/bigip/tm/security/test/unit/test_dos.py
f5/bigip/test/unit/big_ip_mock.py
# coding=utf-8 # # Copyright 2015-2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """BIG-IP® Advanced Firewall Manager™ (AFM®) module. REST URI ``http://localhost/mgmt/tm/security/scrubber`` GUI Path ``Security --> Option --> Network Firewall --> External Redirection --> Scrubbing Profile`` REST Kind ``tm:security:scrubbercollectionstate:*`` """ from f5.bigip.resource import Collection from f5.bigip.resource import OrganizingCollection from f5.bigip.resource import Resource class Scrubber(OrganizingCollection): """BIG-IP® AFM® Scrubber organizing collection.""" def __init__(self, security): super(Scrubber, self).__init__(security) self._meta_data['allowed_lazy_attributes'] = [ Profile_s] class Profile_s(Collection): """BIG-IP® AFM® Scrubber Profile collection""" def __init__(self, scrubber): super(Profile_s, self).__init__(scrubber) self._meta_data['allowed_lazy_attributes'] = [Profile] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:profilestate': Profile} class Profile(Resource): """BIG-IP® AFM® Scrubber Profile resource""" def __init__(self, profile_s): super(Profile, self).__init__(profile_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:profilestate' self._meta_data['required_load_parameters'].update(('partition', 'name')) self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber_rt_domaincollectionstate': Scrubber_Rt_Domain_s, 'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriescollectionstate': Scrubber_Categories_s, 'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-servercollectionstate': Scrubber_Virtual_Server_s, 'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-servercollectionstate': Scrubber_Netflow_Protected_Server_s} self._meta_data['allowed_lazy_attributes'] = [ Scrubber_Rt_Domain_s, Scrubber_Virtual_Server_s, Scrubber_Categories_s, Scrubber_Netflow_Protected_Server_s] class Scrubber_Rt_Domain_s(Collection): """BIG-IP® AFM® Scrubber Profile Route Domain collection""" def __init__(self, profile): super(Scrubber_Rt_Domain_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rt_Domain] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rt-domainstate': Scrubber_Rt_Domain} class Scrubber_Rt_Domain(Resource): """BIG-IP® AFM® Scrubber Profile Route Domain resource""" def __init__(self, scrubber_rt_domain_s): super(Scrubber_Rt_Domain, self).__init__(scrubber_rt_domain_s) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rd_Network_Prefix_s] self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rt-domainstate' self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixcollectionstate': Scrubber_Rd_Network_Prefix_s} self._meta_data['required_creation_parameters'].update(('name', 'routeDomain')) class Scrubber_Rd_Network_Prefix_s(Collection): """BIG-IP® AFM® Scrubber Rd Network Prefix collection""" def __init__(self, scrubber_rt_domain): super(Scrubber_Rd_Network_Prefix_s, self).__init__(scrubber_rt_domain) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Rd_Network_Prefix] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixstate': Scrubber_Rd_Network_Prefix} class Scrubber_Rd_Network_Prefix(Resource): """BIG-IP® AFM® Scrubber Rd Network Prefix resource""" def __init__(self, scrubber_rd_network_prefix_s): super(Scrubber_Rd_Network_Prefix, self).__init__(scrubber_rd_network_prefix_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-rt-domain:scrubber-rd-network-prefix:scrubber-rd-network-prefixstate' self._meta_data['required_creation_parameters'].update(('name', 'nextHop', 'dstIp', 'mask')) class Scrubber_Virtual_Server_s(Collection): """BIG-IP® AFM® Scrubber Profile Virtual Server collection""" def __init__(self, profile): super(Scrubber_Virtual_Server_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Virtual_Server] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-serverstate': Scrubber_Virtual_Server} class Scrubber_Virtual_Server(Resource): """BIG-IP® AFM® Scrubber Profile Virtual Server resource""" def __init__(self, scrubber_virtual_server_s): super(Scrubber_Virtual_Server, self).__init__(scrubber_virtual_server_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-virtual-server:scrubber-virtual-serverstate' self._meta_data['required_creation_parameters'].update(('name', 'vsName')) class Scrubber_Categories_s(Collection): """BIG-IP® AFM® Scrubber Profile Categories collection""" def __init__(self, profile): super(Scrubber_Categories_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Categories] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriesstate': Scrubber_Categories} class Scrubber_Categories(Resource): """BIG-IP® AFM® Scrubber Profile Categories resource""" def __init__(self, scrubber_categories_s): super(Scrubber_Categories, self).__init__(scrubber_categories_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-categories:scrubber-categoriesstate' self._meta_data['required_creation_parameters'].update(('name', 'blacklistCategory', 'routeDomainName')) class Scrubber_Netflow_Protected_Server_s(Collection): """BIG-IP® AFM® Scrubber Profile Netflow Protected Server collection""" def __init__(self, profile): super(Scrubber_Netflow_Protected_Server_s, self).__init__(profile) self._meta_data['allowed_lazy_attributes'] = [Scrubber_Netflow_Protected_Server] self._meta_data['attribute_registry'] = \ {'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-serverstate': Scrubber_Netflow_Protected_Server} class Scrubber_Netflow_Protected_Server(Resource): """BIG-IP® AFM® Scrubber Profile Netflow Protected Server resource""" def __init__(self, scrubber_netflow_protected_server_s): super(Scrubber_Netflow_Protected_Server, self).__init__(scrubber_netflow_protected_server_s) self._meta_data['required_json_kind'] = \ 'tm:security:scrubber:profile:scrubber-netflow-protected-server:scrubber-netflow-protected-serverstate' self._meta_data['required_creation_parameters'].update(('name', 'npsName'))
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import pytest from f5.bigip import ManagementRoot from f5.bigip.tm.security.dos import Application from f5.bigip.tm.security.dos import Applications from f5.bigip.tm.security.dos import Device_Config from f5.bigip.tm.security.dos import Dos_Network from f5.bigip.tm.security.dos import Dos_Networks from f5.bigip.tm.security.dos import Profile from f5.bigip.tm.security.dos import Protocol_Dns from f5.bigip.tm.security.dos import Protocol_Dns_s from f5.bigip.tm.security.dos import Protocol_Sip from f5.bigip.tm.security.dos import Protocol_Sips from f5.sdk_exception import MissingRequiredCreationParameter from f5.sdk_exception import MissingRequiredReadParameter from f5.sdk_exception import UnsupportedOperation from six import iterkeys @pytest.fixture def FakeProfile(): fake_profiles = mock.MagicMock() fake_profile = Profile(fake_profiles) return fake_profile @pytest.fixture def FakeDeviceConfig(): fake_device_configs = mock.MagicMock() fake_device_config = Device_Config(fake_device_configs) return fake_device_config def Makeprofile(fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') p = b.tm.security.dos.profiles.profile p._meta_data['uri'] = \ 'https://192.168.1.1:443/mgmt/tm/security/dos/profile/~Common' \ '~testprofile/' return p class TestDosProfile(object): def test_create_two(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') r1 = b.tm.security.dos.profiles.profile r2 = b.tm.security.dos.profiles.profile assert r1 is not r2 def test_create_no_args(self, FakeProfile): with pytest.raises(MissingRequiredCreationParameter): FakeProfile.create() class TestApplicationSubcollection(object): def test_app_subcollection(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:application:applicationstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Applications) assert kind in list(iterkeys(test_meta)) assert Application in test_meta2 def test_app_create(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) pc2 = Applications(Makeprofile(fakeicontrolsession)) r1 = pc.application r2 = pc2.application assert r1 is not r2 def test_app_create_no_args_v11(self, fakeicontrolsession): pc = Applications(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.application.create() class TestDosNetworksSubcollection(object): def test_dosnet_subcollection(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:dos-network:dos-networkstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Dos_Networks) assert kind in list(iterkeys(test_meta)) assert Dos_Network in test_meta2 def test_dosnet_create(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) pc2 = Dos_Networks(Makeprofile(fakeicontrolsession)) r1 = pc.dos_network r2 = pc2.dos_network assert r1 is not r2 def test_dosnet_create_no_args_v11(self, fakeicontrolsession): pc = Dos_Networks(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.dos_network.create() class TestProtocolDnsSubcollection(object): def test_dns_subcollection(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:protocol-dns:protocol-dnsstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Protocol_Dns_s) assert kind in list(iterkeys(test_meta)) assert Protocol_Dns in test_meta2 def test_dns_create(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) pc2 = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) r1 = pc.protocol_dns r2 = pc2.protocol_dns assert r1 is not r2 def test_dns_create_no_args_v11(self, fakeicontrolsession): pc = Protocol_Dns_s(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.protocol_dns.create() class TestProtocolSipSubcollection(object): def test_sip_subcollection(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) kind = 'tm:security:dos:profile:protocol-sip:protocol-sipstate' test_meta = pc._meta_data['attribute_registry'] test_meta2 = pc._meta_data['allowed_lazy_attributes'] assert isinstance(pc, Protocol_Sips) assert kind in list(iterkeys(test_meta)) assert Protocol_Sip in test_meta2 def test_sip_create(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) pc2 = Protocol_Sips(Makeprofile(fakeicontrolsession)) r1 = pc.protocol_sip r2 = pc2.protocol_sip assert r1 is not r2 def test_sip_create_no_args_v11(self, fakeicontrolsession): pc = Protocol_Sips(Makeprofile(fakeicontrolsession)) with pytest.raises(MissingRequiredCreationParameter): pc.protocol_sip.create() class TestDosDeviceConfig(object): def test_create_raises(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') with pytest.raises(UnsupportedOperation): b.tm.security.dos.device_configs.device_config.create() def test_delete_raises(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') with pytest.raises(UnsupportedOperation): b.tm.security.dos.device_configs.device_config.delete() def test_load_no_args(self, FakeDeviceConfig): with pytest.raises(MissingRequiredReadParameter): FakeDeviceConfig.load()
F5Networks/f5-common-python
f5/bigip/tm/security/test/unit/test_dos.py
f5/bigip/tm/security/scrubber.py
from __future__ import absolute_import from __future__ import unicode_literals class OperationFailedError(Exception): def __init__(self, reason): self.msg = reason class StreamParseError(RuntimeError): def __init__(self, reason): self.msg = reason
from __future__ import absolute_import from __future__ import unicode_literals import os import platform import docker import pytest import compose from compose.cli import errors from compose.cli.docker_client import docker_client from compose.cli.docker_client import tls_config_from_options from tests import mock from tests import unittest class DockerClientTestCase(unittest.TestCase): def test_docker_client_no_home(self): with mock.patch.dict(os.environ): del os.environ['HOME'] docker_client(os.environ) @mock.patch.dict(os.environ) def test_docker_client_with_custom_timeout(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ) assert client.timeout == 123 @mock.patch.dict(os.environ) def test_custom_timeout_error(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ) with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.RequestsConnectionError( errors.ReadTimeoutError(None, None, None)) assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.ReadTimeout() assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] def test_user_agent(self): client = docker_client(os.environ) expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format( compose.__version__, docker.__version__, platform.system(), platform.release() ) self.assertEqual(client.headers['User-Agent'], expected) class TLSConfigTestCase(unittest.TestCase): ca_cert = 'tests/fixtures/tls/ca.pem' client_cert = 'tests/fixtures/tls/cert.pem' key = 'tests/fixtures/tls/key.key' def test_simple_tls(self): options = {'--tls': True} result = tls_config_from_options(options) assert result is True def test_tls_ca_cert(self): options = { '--tlscacert': self.ca_cert, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_ca_cert_explicit(self): options = { '--tlscacert': self.ca_cert, '--tls': True, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_cert(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_cert_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_and_ca(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_and_ca_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_missing_key(self): options = {'--tlscert': self.client_cert} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) options = {'--tlskey': self.key} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) def test_assert_hostname_explicit_skip(self): options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True} result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.assert_hostname is False def test_tls_client_and_ca_quoted_paths(self): options = { '--tlscacert': '"{0}"'.format(self.ca_cert), '--tlscert': '"{0}"'.format(self.client_cert), '--tlskey': '"{0}"'.format(self.key), '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (self.client_cert, self.key) assert result.ca_cert == self.ca_cert assert result.verify is True
dbdd4us/compose
tests/unit/cli/docker_client_test.py
compose/errors.py
from __future__ import absolute_import from __future__ import unicode_literals import logging import operator import sys from threading import Thread from docker.errors import APIError from six.moves import _thread as thread from six.moves.queue import Empty from six.moves.queue import Queue from compose.cli.signals import ShutdownException from compose.errors import OperationFailedError from compose.utils import get_output_stream log = logging.getLogger(__name__) STOP = object() def parallel_execute(objects, func, get_name, msg, get_deps=None): """Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. get_deps called on object must return a collection with its dependencies. get_name called on object must return its name. """ objects = list(objects) stream = get_output_stream(sys.stderr) writer = ParallelStreamWriter(stream, msg) for obj in objects: writer.initialize(get_name(obj)) events = parallel_execute_iter(objects, func, get_deps) errors = {} results = [] error_to_reraise = None for obj, result, exception in events: if exception is None: writer.write(get_name(obj), 'done') results.append(result) elif isinstance(exception, APIError): errors[get_name(obj)] = exception.explanation writer.write(get_name(obj), 'error') elif isinstance(exception, OperationFailedError): errors[get_name(obj)] = exception.msg writer.write(get_name(obj), 'error') elif isinstance(exception, UpstreamError): writer.write(get_name(obj), 'error') else: errors[get_name(obj)] = exception error_to_reraise = exception for obj_name, error in errors.items(): stream.write("\nERROR: for {} {}\n".format(obj_name, error)) if error_to_reraise: raise error_to_reraise return results, errors def _no_deps(x): return [] class State(object): """ Holds the state of a partially-complete parallel operation. state.started: objects being processed state.finished: objects which have been processed state.failed: objects which either failed or whose dependencies failed """ def __init__(self, objects): self.objects = objects self.started = set() self.finished = set() self.failed = set() def is_done(self): return len(self.finished) + len(self.failed) >= len(self.objects) def pending(self): return set(self.objects) - self.started - self.finished - self.failed def parallel_execute_iter(objects, func, get_deps): """ Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. Returns an iterator of tuples which look like: # if func returned normally when run on object (object, result, None) # if func raised an exception when run on object (object, None, exception) # if func raised an exception when run on one of object's dependencies (object, None, UpstreamError()) """ if get_deps is None: get_deps = _no_deps results = Queue() state = State(objects) while True: feed_queue(objects, func, get_deps, results, state) try: event = results.get(timeout=0.1) except Empty: continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if event is STOP: break obj, _, exception = event if exception is None: log.debug('Finished processing: {}'.format(obj)) state.finished.add(obj) else: log.debug('Failed: {}'.format(obj)) state.failed.add(obj) yield event def producer(obj, func, results): """ The entry point for a producer thread which runs func on a single object. Places a tuple on the results queue once func has either returned or raised. """ try: result = func(obj) results.put((obj, result, None)) except Exception as e: results.put((obj, None, e)) def feed_queue(objects, func, get_deps, results, state): """ Starts producer threads for any objects which are ready to be processed (i.e. they have no dependencies which haven't been successfully processed). Shortcuts any objects whose dependencies have failed and places an (object, None, UpstreamError()) tuple on the results queue. """ pending = state.pending() log.debug('Pending: {}'.format(pending)) for obj in pending: deps = get_deps(obj) if any(dep in state.failed for dep in deps): log.debug('{} has upstream errors - not processing'.format(obj)) results.put((obj, None, UpstreamError())) state.failed.add(obj) elif all( dep not in objects or dep in state.finished for dep in deps ): log.debug('Starting producer thread for {}'.format(obj)) t = Thread(target=producer, args=(obj, func, results)) t.daemon = True t.start() state.started.add(obj) if state.is_done(): results.put(STOP) class UpstreamError(Exception): pass class ParallelStreamWriter(object): """Write out messages for operations happening in parallel. Each operation has it's own line, and ANSI code characters are used to jump to the correct line, and write over the line. """ def __init__(self, stream, msg): self.stream = stream self.msg = msg self.lines = [] def initialize(self, obj_index): if self.msg is None: return self.lines.append(obj_index) self.stream.write("{} {} ... \r\n".format(self.msg, obj_index)) self.stream.flush() def write(self, obj_index, status): if self.msg is None: return position = self.lines.index(obj_index) diff = len(self.lines) - position # move up self.stream.write("%c[%dA" % (27, diff)) # erase self.stream.write("%c[2K\r" % 27) self.stream.write("{} {} ... {}\r".format(self.msg, obj_index, status)) # move back down self.stream.write("%c[%dB" % (27, diff)) self.stream.flush() def parallel_operation(containers, operation, options, message): parallel_execute( containers, operator.methodcaller(operation, **options), operator.attrgetter('name'), message) def parallel_remove(containers, options): stopped_containers = [c for c in containers if not c.is_running] parallel_operation(stopped_containers, 'remove', options, 'Removing') def parallel_start(containers, options): parallel_operation(containers, 'start', options, 'Starting') def parallel_pause(containers, options): parallel_operation(containers, 'pause', options, 'Pausing') def parallel_unpause(containers, options): parallel_operation(containers, 'unpause', options, 'Unpausing') def parallel_kill(containers, options): parallel_operation(containers, 'kill', options, 'Killing') def parallel_restart(containers, options): parallel_operation(containers, 'restart', options, 'Restarting')
from __future__ import absolute_import from __future__ import unicode_literals import os import platform import docker import pytest import compose from compose.cli import errors from compose.cli.docker_client import docker_client from compose.cli.docker_client import tls_config_from_options from tests import mock from tests import unittest class DockerClientTestCase(unittest.TestCase): def test_docker_client_no_home(self): with mock.patch.dict(os.environ): del os.environ['HOME'] docker_client(os.environ) @mock.patch.dict(os.environ) def test_docker_client_with_custom_timeout(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ) assert client.timeout == 123 @mock.patch.dict(os.environ) def test_custom_timeout_error(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ) with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.RequestsConnectionError( errors.ReadTimeoutError(None, None, None)) assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.ReadTimeout() assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] def test_user_agent(self): client = docker_client(os.environ) expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format( compose.__version__, docker.__version__, platform.system(), platform.release() ) self.assertEqual(client.headers['User-Agent'], expected) class TLSConfigTestCase(unittest.TestCase): ca_cert = 'tests/fixtures/tls/ca.pem' client_cert = 'tests/fixtures/tls/cert.pem' key = 'tests/fixtures/tls/key.key' def test_simple_tls(self): options = {'--tls': True} result = tls_config_from_options(options) assert result is True def test_tls_ca_cert(self): options = { '--tlscacert': self.ca_cert, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_ca_cert_explicit(self): options = { '--tlscacert': self.ca_cert, '--tls': True, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_cert(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_cert_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_and_ca(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_and_ca_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_missing_key(self): options = {'--tlscert': self.client_cert} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) options = {'--tlskey': self.key} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) def test_assert_hostname_explicit_skip(self): options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True} result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.assert_hostname is False def test_tls_client_and_ca_quoted_paths(self): options = { '--tlscacert': '"{0}"'.format(self.ca_cert), '--tlscert': '"{0}"'.format(self.client_cert), '--tlskey': '"{0}"'.format(self.key), '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (self.client_cert, self.key) assert result.ca_cert == self.ca_cert assert result.verify is True
dbdd4us/compose
tests/unit/cli/docker_client_test.py
compose/parallel.py
from __future__ import division import sys import theano.tensor as tt # pylint: disable=unused-import import theano from theano.tensor import ( constant, flatten, zeros_like, ones_like, stack, concatenate, sum, prod, lt, gt, le, ge, eq, neq, switch, clip, where, and_, or_, abs_, exp, log, cos, sin, tan, cosh, sinh, tanh, sqr, sqrt, erf, erfc, erfinv, erfcinv, dot, maximum, minimum, sgn, ceil, floor) from theano.tensor.nlinalg import det, matrix_inverse, extract_diag, matrix_dot, trace import theano.tensor.slinalg import theano.sparse from theano.tensor.nnet import sigmoid from theano.gof import Op, Apply import numpy as np import scipy as sp import scipy.sparse from scipy.linalg import block_diag as scipy_block_diag from pymc3.theanof import floatX, largest_common_dtype, ix_ # pylint: enable=unused-import def tround(*args, **kwargs): """ Temporary function to silence round warning in Theano. Please remove when the warning disappears. """ kwargs['mode'] = 'half_to_even' return tt.round(*args, **kwargs) def logsumexp(x, axis=None): # Adapted from https://github.com/Theano/Theano/issues/1563 x_max = tt.max(x, axis=axis, keepdims=True) return tt.log(tt.sum(tt.exp(x - x_max), axis=axis, keepdims=True)) + x_max def logaddexp(a, b): diff = b - a return tt.switch(diff > 0, b + tt.log1p(tt.exp(-diff)), a + tt.log1p(tt.exp(diff))) def invlogit(x, eps=sys.float_info.epsilon): return (1. - 2. * eps) / (1. + tt.exp(-x)) + eps def logit(p): return tt.log(p / (floatX(1) - p)) def flatten_list(tensors): return tt.concatenate([var.ravel() for var in tensors]) class LogDet(Op): """Compute the logarithm of the absolute determinant of a square matrix M, log(abs(det(M))) on the CPU. Avoids det(M) overflow/ underflow. Note ---- Once PR #3959 (https://github.com/Theano/Theano/pull/3959/) by harpone is merged, this must be removed. """ def make_node(self, x): x = theano.tensor.as_tensor_variable(x) o = theano.tensor.scalar(dtype=x.dtype) return Apply(self, [x], [o]) def perform(self, node, inputs, outputs, params=None): try: (x,) = inputs (z,) = outputs s = np.linalg.svd(x, compute_uv=False) log_det = np.sum(np.log(np.abs(s))) z[0] = np.asarray(log_det, dtype=x.dtype) except Exception: print('Failed to compute logdet of {}.'.format(x)) raise def grad(self, inputs, g_outputs): [gz] = g_outputs [x] = inputs return [gz * matrix_inverse(x).T] def __str__(self): return "LogDet" logdet = LogDet() def probit(p): return -sqrt(2.) * erfcinv(2. * p) def invprobit(x): return .5 * erfc(-x / sqrt(2.)) def expand_packed_triangular(n, packed, lower=True, diagonal_only=False): R"""Convert a packed triangular matrix into a two dimensional array. Triangular matrices can be stored with better space efficiancy by storing the non-zero values in a one-dimensional array. We number the elements by row like this (for lower or upper triangular matrices):: [[0 - - -] [[0 1 2 3] [1 2 - -] [- 4 5 6] [3 4 5 -] [- - 7 8] [6 7 8 9]] [- - - 9] Parameters ---------- n : int The number of rows of the triangular matrix. packed : theano.vector The matrix in packed format. lower : bool, default=True If true, assume that the matrix is lower triangular. diagonal_only : bool If true, return only the diagonal of the matrix. """ if packed.ndim != 1: raise ValueError('Packed triagular is not one dimensional.') if not isinstance(n, int): raise TypeError('n must be an integer') if diagonal_only and lower: diag_idxs = np.arange(1, n + 1).cumsum() - 1 return packed[diag_idxs] elif diagonal_only and not lower: diag_idxs = np.arange(2, n + 2)[::-1].cumsum() - n - 1 return packed[diag_idxs] elif lower: out = tt.zeros((n, n), dtype=theano.config.floatX) idxs = np.tril_indices(n) return tt.set_subtensor(out[idxs], packed) elif not lower: out = tt.zeros((n, n), dtype=theano.config.floatX) idxs = np.triu_indices(n) return tt.set_subtensor(out[idxs], packed) class BatchedDiag(tt.Op): """ Fast BatchedDiag allocation """ __props__ = () def make_node(self, diag): diag = tt.as_tensor_variable(diag) if diag.type.ndim != 2: raise TypeError('data argument must be a matrix', diag.type) return tt.Apply(self, [diag], [tt.tensor3(dtype=diag.dtype)]) def perform(self, node, ins, outs, params=None): (C,) = ins (z,) = outs bc = C.shape[0] dim = C.shape[-1] Cd = np.zeros((bc, dim, dim), C.dtype) bidx = np.repeat(np.arange(bc), dim) didx = np.tile(np.arange(dim), bc) Cd[bidx, didx, didx] = C.flatten() z[0] = Cd def grad(self, inputs, gout): (gz,) = gout idx = tt.arange(gz.shape[-1]) return [gz[..., idx, idx]] def infer_shape(self, nodes, shapes): return [(shapes[0][0], ) + (shapes[0][1],) * 2] def batched_diag(C): C = tt.as_tensor(C) dim = C.shape[-1] if C.ndim == 2: # diag -> matrices return BatchedDiag()(C) elif C.ndim == 3: # matrices -> diag idx = tt.arange(dim) return C[..., idx, idx] else: raise ValueError('Input should be 2 or 3 dimensional') class BlockDiagonalMatrix(Op): __props__ = ('sparse', 'format') def __init__(self, sparse=False, format='csr'): if format not in ('csr', 'csc'): raise ValueError("format must be one of: 'csr', 'csc', got {}".format(format)) self.sparse = sparse self.format = format def make_node(self, *matrices): if not matrices: raise ValueError('no matrices to allocate') matrices = list(map(tt.as_tensor, matrices)) if any(mat.type.ndim != 2 for mat in matrices): raise TypeError('all data arguments must be matrices') if self.sparse: out_type = theano.sparse.matrix(self.format, dtype=largest_common_dtype(matrices)) else: out_type = theano.tensor.matrix(dtype=largest_common_dtype(matrices)) return tt.Apply(self, matrices, [out_type]) def perform(self, node, inputs, output_storage, params=None): dtype = largest_common_dtype(inputs) if self.sparse: output_storage[0][0] = sp.sparse.block_diag( inputs, self.format, dtype ) else: output_storage[0][0] = scipy_block_diag(*inputs).astype(dtype) def grad(self, inputs, gout): shapes = tt.stack([i.shape for i in inputs]) index_end = shapes.cumsum(0) index_begin = index_end - shapes slices = [ix_(tt.arange(index_begin[i, 0], index_end[i, 0]), tt.arange(index_begin[i, 1], index_end[i, 1]) ) for i in range(len(inputs))] return [gout[0][slc] for slc in slices] def infer_shape(self, nodes, shapes): first, second = zip(*shapes) return [(tt.add(*first), tt.add(*second))] def block_diagonal(matrices, sparse=False, format='csr'): """See scipy.sparse.block_diag or scipy.linalg.block_diag for reference Parameters ---------- matrices : tensors format : str (default 'csr') must be one of: 'csr', 'csc' sparse : bool (default False) if True return sparse format Returns ------- matrix """ if len(matrices) == 1: # graph optimization return matrices[0] return BlockDiagonalMatrix(sparse=sparse, format=format)(*matrices)
import pytest import pymc3 as pm import numpy as np from numpy.testing import assert_almost_equal from .helpers import SeededTest from pymc3.distributions.transforms import Transform class TestTransformName(object): cases = [ ('var', 'var_test__'), ('var_test_', 'var_test__test__') ] transform_name = 'test' def test_get_transformed_name(self): test_transform = Transform() test_transform.name = self.transform_name for name, transformed in self.cases: assert pm.util.get_transformed_name(name, test_transform) == transformed def test_is_transformed_name(self): for name, transformed in self.cases: assert pm.util.is_transformed_name(transformed) assert not pm.util.is_transformed_name(name) def test_get_untransformed_name(self): for name, transformed in self.cases: assert pm.util.get_untransformed_name(transformed) == name with pytest.raises(ValueError): pm.util.get_untransformed_name(name) class TestUpdateStartVals(SeededTest): def setup_method(self): super(TestUpdateStartVals, self).setup_method() def test_soft_update_all_present(self): start = {'a': 1, 'b': 2} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 2} def test_soft_update_one_missing(self): start = {'a': 1, } test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 4} def test_soft_update_empty(self): start = {} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == test_point def test_soft_update_transformed(self): with pm.Model() as model: pm.Exponential('a', 1) start = {'a': 2.} test_point = {'a_log__': 0} pm.util.update_start_vals(start, test_point, model) assert_almost_equal(np.exp(start['a_log__']), start['a']) def test_soft_update_parent(self): with pm.Model() as model: a = pm.Uniform('a', lower=0., upper=1.) b = pm.Uniform('b', lower=2., upper=3.) pm.Uniform('lower', lower=a, upper=3.) pm.Uniform('upper', lower=0., upper=b) pm.Uniform('interv', lower=a, upper=b) start = {'a': .3, 'b': 2.1, 'lower': 1.4, 'upper': 1.4, 'interv':1.4} test_point = {'lower_interval__': -0.3746934494414109, 'upper_interval__': 0.693147180559945, 'interv_interval__': 0.4519851237430569} pm.util.update_start_vals(start, model.test_point, model) assert_almost_equal(start['lower_interval__'], test_point['lower_interval__']) assert_almost_equal(start['upper_interval__'], test_point['upper_interval__']) assert_almost_equal(start['interv_interval__'], test_point['interv_interval__'])
springcoil/pymc3
pymc3/tests/test_util.py
pymc3/math.py
from .advi import advi, sample_vp from .advi_minibatch import advi_minibatch # commonly used from . import updates from .updates import ( sgd, apply_momentum, momentum, apply_nesterov_momentum, adagrad_window, nesterov_momentum, adagrad, rmsprop, adadelta, adam, adamax, norm_constraint, total_norm_constraint ) from . import inference from .inference import ( ADVI, FullRankADVI, SVGD, ASVGD, NFVI, Inference, KLqp, ImplicitGradient, fit ) from . import approximations from .approximations import ( MeanField, FullRank, Empirical, NormalizingFlow, sample_approx ) from . import opvi from .opvi import ( Group, Approximation ) # special from .stein import Stein from . import flows from . import operators from . import test_functions from . import callbacks
import pytest import pymc3 as pm import numpy as np from numpy.testing import assert_almost_equal from .helpers import SeededTest from pymc3.distributions.transforms import Transform class TestTransformName(object): cases = [ ('var', 'var_test__'), ('var_test_', 'var_test__test__') ] transform_name = 'test' def test_get_transformed_name(self): test_transform = Transform() test_transform.name = self.transform_name for name, transformed in self.cases: assert pm.util.get_transformed_name(name, test_transform) == transformed def test_is_transformed_name(self): for name, transformed in self.cases: assert pm.util.is_transformed_name(transformed) assert not pm.util.is_transformed_name(name) def test_get_untransformed_name(self): for name, transformed in self.cases: assert pm.util.get_untransformed_name(transformed) == name with pytest.raises(ValueError): pm.util.get_untransformed_name(name) class TestUpdateStartVals(SeededTest): def setup_method(self): super(TestUpdateStartVals, self).setup_method() def test_soft_update_all_present(self): start = {'a': 1, 'b': 2} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 2} def test_soft_update_one_missing(self): start = {'a': 1, } test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 4} def test_soft_update_empty(self): start = {} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == test_point def test_soft_update_transformed(self): with pm.Model() as model: pm.Exponential('a', 1) start = {'a': 2.} test_point = {'a_log__': 0} pm.util.update_start_vals(start, test_point, model) assert_almost_equal(np.exp(start['a_log__']), start['a']) def test_soft_update_parent(self): with pm.Model() as model: a = pm.Uniform('a', lower=0., upper=1.) b = pm.Uniform('b', lower=2., upper=3.) pm.Uniform('lower', lower=a, upper=3.) pm.Uniform('upper', lower=0., upper=b) pm.Uniform('interv', lower=a, upper=b) start = {'a': .3, 'b': 2.1, 'lower': 1.4, 'upper': 1.4, 'interv':1.4} test_point = {'lower_interval__': -0.3746934494414109, 'upper_interval__': 0.693147180559945, 'interv_interval__': 0.4519851237430569} pm.util.update_start_vals(start, model.test_point, model) assert_almost_equal(start['lower_interval__'], test_point['lower_interval__']) assert_almost_equal(start['upper_interval__'], test_point['upper_interval__']) assert_almost_equal(start['interv_interval__'], test_point['interv_interval__'])
springcoil/pymc3
pymc3/tests/test_util.py
pymc3/variational/__init__.py
# This model was presented by Jake Vanderplas in his blog post about # comparing different MCMC packages # http://jakevdp.github.io/blog/2014/06/14/frequentism-and-bayesianism-4-bayesian-in-python/ # # While at the core it's just a linear regression, it's a nice # illustration of using Jeffrey priors and custom density # distributions in PyMC3. # # Adapted to PyMC3 by Thomas Wiecki import matplotlib.pyplot as plt import numpy as np import pymc3 as pm import theano.tensor as tt np.random.seed(42) theta_true = (25, 0.5) xdata = 100 * np.random.random(20) ydata = theta_true[0] + theta_true[1] * xdata # add scatter to points xdata = np.random.normal(xdata, 10) ydata = np.random.normal(ydata, 10) data = {'x': xdata, 'y': ydata} # define loglikelihood outside of the model context, otherwise njobs wont work: # Lambdas defined in local namespace are not picklable (see issue #1995) def loglike1(value): return -1.5 * tt.log(1 + value**2) def loglike2(value): return -tt.log(tt.abs_(value)) with pm.Model() as model: alpha = pm.Normal('intercept', mu=0, sd=100) # Create custom densities beta = pm.DensityDist('slope', loglike1, testval=0) sigma = pm.DensityDist('sigma', loglike2, testval=1) # Create likelihood like = pm.Normal('y_est', mu=alpha + beta * xdata, sd=sigma, observed=ydata) trace = pm.sample(2000, njobs=2) ################################################# # Create some convenience routines for plotting # All functions below written by Jake Vanderplas def compute_sigma_level(trace1, trace2, nbins=20): """From a set of traces, bin by number of standard deviations""" L, xbins, ybins = np.histogram2d(trace1, trace2, nbins) L[L == 0] = 1E-16 shape = L.shape L = L.ravel() # obtain the indices to sort and unsort the flattened array i_sort = np.argsort(L)[::-1] i_unsort = np.argsort(i_sort) L_cumsum = L[i_sort].cumsum() L_cumsum /= L_cumsum[-1] xbins = 0.5 * (xbins[1:] + xbins[:-1]) ybins = 0.5 * (ybins[1:] + ybins[:-1]) return xbins, ybins, L_cumsum[i_unsort].reshape(shape) def plot_MCMC_trace(ax, xdata, ydata, trace, scatter=False, **kwargs): """Plot traces and contours""" xbins, ybins, sigma = compute_sigma_level(trace[0], trace[1]) ax.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955], **kwargs) if scatter: ax.plot(trace[0], trace[1], ',k', alpha=0.1) ax.set_xlabel(r'$\alpha$') ax.set_ylabel(r'$\beta$') def plot_MCMC_model(ax, xdata, ydata, trace): """Plot the linear model and 2sigma contours""" ax.plot(xdata, ydata, 'ok') alpha, beta = trace[:2] xfit = np.linspace(-20, 120, 10) yfit = alpha[:, None] + beta[:, None] * xfit mu = yfit.mean(0) sig = 2 * yfit.std(0) ax.plot(xfit, mu, '-k') ax.fill_between(xfit, mu - sig, mu + sig, color='lightgray') ax.set_xlabel('x') ax.set_ylabel('y') def plot_MCMC_results(xdata, ydata, trace, colors='k'): """Plot both the trace and the model together""" _, ax = plt.subplots(1, 2, figsize=(10, 4)) plot_MCMC_trace(ax[0], xdata, ydata, trace, True, colors=colors) plot_MCMC_model(ax[1], xdata, ydata, trace) pymc3_trace = [trace['intercept'], trace['slope'], trace['sigma']] plot_MCMC_results(xdata, ydata, pymc3_trace) plt.show()
import pytest import pymc3 as pm import numpy as np from numpy.testing import assert_almost_equal from .helpers import SeededTest from pymc3.distributions.transforms import Transform class TestTransformName(object): cases = [ ('var', 'var_test__'), ('var_test_', 'var_test__test__') ] transform_name = 'test' def test_get_transformed_name(self): test_transform = Transform() test_transform.name = self.transform_name for name, transformed in self.cases: assert pm.util.get_transformed_name(name, test_transform) == transformed def test_is_transformed_name(self): for name, transformed in self.cases: assert pm.util.is_transformed_name(transformed) assert not pm.util.is_transformed_name(name) def test_get_untransformed_name(self): for name, transformed in self.cases: assert pm.util.get_untransformed_name(transformed) == name with pytest.raises(ValueError): pm.util.get_untransformed_name(name) class TestUpdateStartVals(SeededTest): def setup_method(self): super(TestUpdateStartVals, self).setup_method() def test_soft_update_all_present(self): start = {'a': 1, 'b': 2} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 2} def test_soft_update_one_missing(self): start = {'a': 1, } test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 4} def test_soft_update_empty(self): start = {} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == test_point def test_soft_update_transformed(self): with pm.Model() as model: pm.Exponential('a', 1) start = {'a': 2.} test_point = {'a_log__': 0} pm.util.update_start_vals(start, test_point, model) assert_almost_equal(np.exp(start['a_log__']), start['a']) def test_soft_update_parent(self): with pm.Model() as model: a = pm.Uniform('a', lower=0., upper=1.) b = pm.Uniform('b', lower=2., upper=3.) pm.Uniform('lower', lower=a, upper=3.) pm.Uniform('upper', lower=0., upper=b) pm.Uniform('interv', lower=a, upper=b) start = {'a': .3, 'b': 2.1, 'lower': 1.4, 'upper': 1.4, 'interv':1.4} test_point = {'lower_interval__': -0.3746934494414109, 'upper_interval__': 0.693147180559945, 'interv_interval__': 0.4519851237430569} pm.util.update_start_vals(start, model.test_point, model) assert_almost_equal(start['lower_interval__'], test_point['lower_interval__']) assert_almost_equal(start['upper_interval__'], test_point['upper_interval__']) assert_almost_equal(start['interv_interval__'], test_point['interv_interval__'])
springcoil/pymc3
pymc3/tests/test_util.py
pymc3/examples/custom_dists.py
import theano import theano.tensor as tt from ..model import FreeRV from ..theanof import gradient, floatX from . import distribution from ..math import logit, invlogit from .distribution import draw_values import numpy as np __all__ = ['transform', 'stick_breaking', 'logodds', 'interval', 'lowerbound', 'upperbound', 'log', 'sum_to_1', 't_stick_breaking'] class Transform(object): """A transformation of a random variable from one space into another. Attributes ---------- name : str """ name = "" def forward(self, x): raise NotImplementedError def forward_val(self, x, point): raise NotImplementedError def backward(self, z): raise NotImplementedError def jacobian_det(self, x): raise NotImplementedError def apply(self, dist): # avoid circular import return TransformedDistribution.dist(dist, self) def __str__(self): return self.name + " transform" class ElemwiseTransform(Transform): def jacobian_det(self, x): grad = tt.reshape(gradient(tt.sum(self.backward(x)), [x]), x.shape) return tt.log(tt.abs_(grad)) class TransformedDistribution(distribution.Distribution): """A distribution that has been transformed from one space into another.""" def __init__(self, dist, transform, *args, **kwargs): """ Parameters ---------- dist : Distribution transform : Transform args, kwargs arguments to Distribution""" forward = transform.forward testval = forward(dist.default()) forward_val = transform.forward_val self.dist = dist self.transform_used = transform v = forward(FreeRV(name='v', distribution=dist)) self.type = v.type super(TransformedDistribution, self).__init__( v.shape.tag.test_value, v.dtype, testval, dist.defaults, *args, **kwargs) if transform.name == 'stickbreaking': b = np.hstack(((np.atleast_1d(self.shape) == 1)[:-1], False)) # force the last dim not broadcastable self.type = tt.TensorType(v.dtype, b) def logp(self, x): return (self.dist.logp(self.transform_used.backward(x)) + self.transform_used.jacobian_det(x)) def logp_nojac(self, x): return self.dist.logp(self.transform_used.backward(x)) transform = Transform class Log(ElemwiseTransform): name = "log" def backward(self, x): return tt.exp(x) def forward(self, x): return tt.log(x) def forward_val(self, x, point=None): return self.forward(x) def jacobian_det(self, x): return x log = Log() class LogOdds(ElemwiseTransform): name = "logodds" def __init__(self): pass def backward(self, x): return invlogit(x, 0.0) def forward(self, x): return logit(x) def forward_val(self, x, point=None): return self.forward(x) logodds = LogOdds() class Interval(ElemwiseTransform): """Transform from real line interval [a,b] to whole real line.""" name = "interval" def __init__(self, a, b): self.a = tt.as_tensor_variable(a) self.b = tt.as_tensor_variable(b) def backward(self, x): a, b = self.a, self.b r = (b - a) * tt.nnet.sigmoid(x) + a return r def forward(self, x): a, b = self.a, self.b return tt.log(x - a) - tt.log(b - x) def forward_val(self, x, point=None): # 2017-06-19 # the `self.a-0.` below is important for the testval to propagates # For an explanation see pull/2328#issuecomment-309303811 a, b = draw_values([self.a-0., self.b-0.], point=point) return floatX(tt.log(x - a) - tt.log(b - x)) def jacobian_det(self, x): s = tt.nnet.softplus(-x) return tt.log(self.b - self.a) - 2 * s - x interval = Interval class LowerBound(ElemwiseTransform): """Transform from real line interval [a,inf] to whole real line.""" name = "lowerbound" def __init__(self, a): self.a = tt.as_tensor_variable(a) def backward(self, x): a = self.a r = tt.exp(x) + a return r def forward(self, x): a = self.a return tt.log(x - a) def forward_val(self, x, point=None): # 2017-06-19 # the `self.a-0.` below is important for the testval to propagates # For an explanation see pull/2328#issuecomment-309303811 a = draw_values([self.a-0.], point=point)[0] return floatX(tt.log(x - a)) def jacobian_det(self, x): return x lowerbound = LowerBound class UpperBound(ElemwiseTransform): """Transform from real line interval [-inf,b] to whole real line.""" name = "upperbound" def __init__(self, b): self.b = tt.as_tensor_variable(b) def backward(self, x): b = self.b r = b - tt.exp(x) return r def forward(self, x): b = self.b return tt.log(b - x) def forward_val(self, x, point=None): # 2017-06-19 # the `self.b-0.` below is important for the testval to propagates # For an explanation see pull/2328#issuecomment-309303811 b = draw_values([self.b-0.], point=point)[0] return floatX(tt.log(b - x)) def jacobian_det(self, x): return x upperbound = UpperBound class SumTo1(Transform): """Transforms K dimensional simplex space (values in [0,1] and sum to 1) to K - 1 vector of values in [0,1] """ name = "sumto1" def backward(self, y): return tt.concatenate([y, 1 - tt.sum(y, keepdims=True)]) def forward(self, x): return x[:-1] def forward_val(self, x, point=None): return self.forward(x) def jacobian_det(self, x): return 0 sum_to_1 = SumTo1() class StickBreaking(Transform): """Transforms K dimensional simplex space (values in [0,1] and sum to 1) to K - 1 vector of real values. Primarily borrowed from the STAN implementation. Parameters ---------- eps : float, positive value A small value for numerical stability in invlogit. """ name = "stickbreaking" def __init__(self, eps=floatX(np.finfo(theano.config.floatX).eps)): self.eps = eps def forward(self, x_): x = x_.T # reverse cumsum x0 = x[:-1] s = tt.extra_ops.cumsum(x0[::-1], 0)[::-1] + x[-1] z = x0 / s Km1 = x.shape[0] - 1 k = tt.arange(Km1)[(slice(None), ) + (None, ) * (x.ndim - 1)] eq_share = logit(1. / (Km1 + 1 - k).astype(str(x_.dtype))) y = logit(z) - eq_share return floatX(y.T) def forward_val(self, x, point=None): return self.forward(x) def backward(self, y_): y = y_.T Km1 = y.shape[0] k = tt.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)] eq_share = logit(1. / (Km1 + 1 - k).astype(str(y_.dtype))) z = invlogit(y + eq_share, self.eps) yl = tt.concatenate([z, tt.ones(y[:1].shape)]) yu = tt.concatenate([tt.ones(y[:1].shape), 1 - z]) S = tt.extra_ops.cumprod(yu, 0) x = S * yl return floatX(x.T) def jacobian_det(self, y_): y = y_.T Km1 = y.shape[0] k = tt.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)] eq_share = logit(1. / (Km1 + 1 - k).astype(str(y_.dtype))) yl = y + eq_share yu = tt.concatenate([tt.ones(y[:1].shape), 1 - invlogit(yl, self.eps)]) S = tt.extra_ops.cumprod(yu, 0) return tt.sum(tt.log(S[:-1]) - tt.log1p(tt.exp(yl)) - tt.log1p(tt.exp(-yl)), 0).T stick_breaking = StickBreaking() t_stick_breaking = lambda eps: StickBreaking(eps) class Circular(Transform): """Transforms a linear space into a circular one. """ name = "circular" def backward(self, y): return tt.arctan2(tt.sin(y), tt.cos(y)) def forward(self, x): return tt.as_tensor_variable(x) def forward_val(self, x, point=None): return self.forward(x) def jacobian_det(self, x): return 0 circular = Circular() class CholeskyCovPacked(Transform): name = "cholesky_cov_packed" def __init__(self, n): self.diag_idxs = np.arange(1, n + 1).cumsum() - 1 def backward(self, x): return tt.advanced_set_subtensor1(x, tt.exp(x[self.diag_idxs]), self.diag_idxs) def forward(self, y): return tt.advanced_set_subtensor1(y, tt.log(y[self.diag_idxs]), self.diag_idxs) def forward_val(self, x, point=None): return self.forward(x) def jacobian_det(self, y): return tt.sum(y[self.diag_idxs])
import pytest import pymc3 as pm import numpy as np from numpy.testing import assert_almost_equal from .helpers import SeededTest from pymc3.distributions.transforms import Transform class TestTransformName(object): cases = [ ('var', 'var_test__'), ('var_test_', 'var_test__test__') ] transform_name = 'test' def test_get_transformed_name(self): test_transform = Transform() test_transform.name = self.transform_name for name, transformed in self.cases: assert pm.util.get_transformed_name(name, test_transform) == transformed def test_is_transformed_name(self): for name, transformed in self.cases: assert pm.util.is_transformed_name(transformed) assert not pm.util.is_transformed_name(name) def test_get_untransformed_name(self): for name, transformed in self.cases: assert pm.util.get_untransformed_name(transformed) == name with pytest.raises(ValueError): pm.util.get_untransformed_name(name) class TestUpdateStartVals(SeededTest): def setup_method(self): super(TestUpdateStartVals, self).setup_method() def test_soft_update_all_present(self): start = {'a': 1, 'b': 2} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 2} def test_soft_update_one_missing(self): start = {'a': 1, } test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == {'a': 1, 'b': 4} def test_soft_update_empty(self): start = {} test_point = {'a': 3, 'b': 4} pm.util.update_start_vals(start, test_point, model=None) assert start == test_point def test_soft_update_transformed(self): with pm.Model() as model: pm.Exponential('a', 1) start = {'a': 2.} test_point = {'a_log__': 0} pm.util.update_start_vals(start, test_point, model) assert_almost_equal(np.exp(start['a_log__']), start['a']) def test_soft_update_parent(self): with pm.Model() as model: a = pm.Uniform('a', lower=0., upper=1.) b = pm.Uniform('b', lower=2., upper=3.) pm.Uniform('lower', lower=a, upper=3.) pm.Uniform('upper', lower=0., upper=b) pm.Uniform('interv', lower=a, upper=b) start = {'a': .3, 'b': 2.1, 'lower': 1.4, 'upper': 1.4, 'interv':1.4} test_point = {'lower_interval__': -0.3746934494414109, 'upper_interval__': 0.693147180559945, 'interv_interval__': 0.4519851237430569} pm.util.update_start_vals(start, model.test_point, model) assert_almost_equal(start['lower_interval__'], test_point['lower_interval__']) assert_almost_equal(start['upper_interval__'], test_point['upper_interval__']) assert_almost_equal(start['interv_interval__'], test_point['interv_interval__'])
springcoil/pymc3
pymc3/tests/test_util.py
pymc3/distributions/transforms.py
import six def convert_to_bytes(resp): resp = convert_body_to_bytes(resp) return resp def convert_to_unicode(resp): resp = convert_body_to_unicode(resp) return resp def convert_body_to_bytes(resp): """ If the request body is a string, encode it to bytes (for python3 support) By default yaml serializes to utf-8 encoded bytestrings. When this cassette is loaded by python3, it's automatically decoded into unicode strings. This makes sure that it stays a bytestring, since that's what all the internal httplib machinery is expecting. For more info on py3 yaml: http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support """ try: if not isinstance(resp['body']['string'], six.binary_type): resp['body']['string'] = resp['body']['string'].encode('utf-8') except (KeyError, TypeError, UnicodeEncodeError): # The thing we were converting either wasn't a dictionary or didn't # have the keys we were expecting. Some of the tests just serialize # and deserialize a string. # Also, sometimes the thing actually is binary, so if you can't encode # it, just give up. pass return resp def convert_body_to_unicode(resp): """ If the request body is bytes, decode it to a string (for python3 support) """ try: if not isinstance(resp['body']['string'], six.text_type): resp['body']['string'] = resp['body']['string'].decode('utf-8') except (KeyError, TypeError, UnicodeDecodeError): # The thing we were converting either wasn't a dictionary or didn't # have the keys we were expecting. Some of the tests just serialize # and deserialize a string. # Also, sometimes the thing actually is binary, so if you can't decode # it, just give up. pass return resp
import pytest import vcr.persist from vcr.serializers import jsonserializer, yamlserializer @pytest.mark.parametrize("cassette_path, serializer", [ ('tests/fixtures/migration/old_cassette.json', jsonserializer), ('tests/fixtures/migration/old_cassette.yaml', yamlserializer), ]) def test_load_cassette_with_old_cassettes(cassette_path, serializer): with pytest.raises(ValueError) as excinfo: vcr.persist.load_cassette(cassette_path, serializer) assert "run the migration script" in excinfo.exconly() @pytest.mark.parametrize("cassette_path, serializer", [ ('tests/fixtures/migration/not_cassette.txt', jsonserializer), ('tests/fixtures/migration/not_cassette.txt', yamlserializer), ]) def test_load_cassette_with_invalid_cassettes(cassette_path, serializer): with pytest.raises(Exception) as excinfo: vcr.persist.load_cassette(cassette_path, serializer) assert "run the migration script" not in excinfo.exconly()
aclevy/vcrpy
tests/unit/test_persist.py
vcr/serializers/compat.py
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15 (https://github.com/warner/python-versioneer) import errno import os import re import subprocess import sys from pandas.compat import PY3 def get_keywords(): # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig(object): pass def get_config(): # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "v" cfg.parentdir_prefix = "pandas-" cfg.versionfile_source = "pandas/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): pass LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run {dispcmd}".format(dispcmd=dispcmd)) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if PY3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd)) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '{root}', but '{dirname}' " "doesn't start with prefix '{parentdir_prefix}'".format( root=root, dirname=dirname, parentdir_prefix=parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '{}', no digits".format(",".join(refs - tags))) if verbose: print("likely tags: {}".format(",".join(sorted(tags)))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking {r}".format(r=r)) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in {root}".format(root=root)) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: " "'{describe_out}'".format( describe_out=describe_out)) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '{full_tag}' doesn't start with prefix " \ "'{tag_prefix}'" print(fmt.format(full_tag=full_tag, tag_prefix=tag_prefix)) pieces["error"] = ("tag '{full_tag}' doesn't start with " "prefix '{tag_prefix}'".format( full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.{:d}.g{}".format(pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post{:d}".format(pieces["distance"]) if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g{}".format(pieces["short"]) else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g{}".format(pieces["short"]) return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '{style}'".format(style=style)) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"}
# -*- coding: utf-8 -*- # pylint: disable=E1101 import datetime as dt import io import gzip import os import struct import warnings from collections import OrderedDict from datetime import datetime import numpy as np import pytest import pandas as pd import pandas.util.testing as tm import pandas.compat as compat from pandas.compat import iterkeys from pandas.core.dtypes.common import is_categorical_dtype from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv from pandas.io.stata import (InvalidColumnName, PossiblePrecisionLoss, StataMissingValue, StataReader, read_stata) @pytest.fixture def dirpath(datapath): return datapath("io", "data") @pytest.fixture def parsed_114(dirpath): dta14_114 = os.path.join(dirpath, 'stata5_114.dta') parsed_114 = read_stata(dta14_114, convert_dates=True) parsed_114.index.name = 'index' return parsed_114 class TestStata(object): @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath("io", "data") self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta') self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta') self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta') self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta') self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta') self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta') self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta') self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta') self.csv3 = os.path.join(self.dirpath, 'stata3.csv') self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta') self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta') self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta') self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta') self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta') self.csv14 = os.path.join(self.dirpath, 'stata5.csv') self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta') self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta') self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta') self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta') self.csv15 = os.path.join(self.dirpath, 'stata6.csv') self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta') self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta') self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta') self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta') self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta') self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta') self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta') self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta') self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta') self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta') self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta') self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta') self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta') self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta') self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta') self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta') self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta') self.dta23 = os.path.join(self.dirpath, 'stata15.dta') self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta') self.dta25_118 = os.path.join(self.dirpath, 'stata16_118.dta') self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta') def read_dta(self, file): # Legacy default reader configuration return read_stata(file, convert_dates=True) def read_csv(self, file): return read_csv(file, parse_dates=True) @pytest.mark.parametrize('version', [114, 117]) def test_read_empty_dta(self, version): empty_ds = DataFrame(columns=['unit']) # GH 7369, make sure can read a 0-obs dta file with tm.ensure_clean() as path: empty_ds.to_stata(path, write_index=False, version=version) empty_ds2 = read_stata(path) tm.assert_frame_equal(empty_ds, empty_ds2) def test_data_method(self): # Minimal testing of legacy data method with StataReader(self.dta1_114) as rdr: with tm.assert_produces_warning(UserWarning): parsed_114_data = rdr.data() with StataReader(self.dta1_114) as rdr: parsed_114_read = rdr.read() tm.assert_frame_equal(parsed_114_data, parsed_114_read) @pytest.mark.parametrize( 'file', ['dta1_114', 'dta1_117']) def test_read_dta1(self, file): file = getattr(self, file) parsed = self.read_dta(file) # Pandas uses np.nan as missing value. # Thus, all columns will be of type float, regardless of their name. expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) # this is an oddity as really the nan should be float64, but # the casting doesn't fail so need to match stata here expected['float_miss'] = expected['float_miss'].astype(np.float32) tm.assert_frame_equal(parsed, expected) def test_read_dta2(self): expected = DataFrame.from_records( [ ( datetime(2006, 11, 19, 23, 13, 20), 1479596223000, datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1), datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1) ), ( datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2), datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1), datetime(1955, 1, 1), datetime(2, 1, 1) ), ( pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, ) ], columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date', 'monthly_date', 'quarterly_date', 'half_yearly_date', 'yearly_date'] ) expected['yearly_date'] = expected['yearly_date'].astype('O') with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed_114 = self.read_dta(self.dta2_114) parsed_115 = self.read_dta(self.dta2_115) parsed_117 = self.read_dta(self.dta2_117) # 113 is buggy due to limits of date format support in Stata # parsed_113 = self.read_dta(self.dta2_113) # Remove resource warnings w = [x for x in w if x.category is UserWarning] # should get warning for each call to read_dta assert len(w) == 3 # buggy test because of the NaT comparison on certain platforms # Format 113 test fails since it does not support tc and tC formats # tm.assert_frame_equal(parsed_113, expected) tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True) tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True) tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True) @pytest.mark.parametrize( 'file', ['dta3_113', 'dta3_114', 'dta3_115', 'dta3_117']) def test_read_dta3(self, file): file = getattr(self, file) parsed = self.read_dta(file) # match stata here expected = self.read_csv(self.csv3) expected = expected.astype(np.float32) expected['year'] = expected['year'].astype(np.int16) expected['quarter'] = expected['quarter'].astype(np.int8) tm.assert_frame_equal(parsed, expected) @pytest.mark.parametrize( 'file', ['dta4_113', 'dta4_114', 'dta4_115', 'dta4_117']) def test_read_dta4(self, file): file = getattr(self, file) parsed = self.read_dta(file) expected = DataFrame.from_records( [ ["one", "ten", "one", "one", "one"], ["two", "nine", "two", "two", "two"], ["three", "eight", "three", "three", "three"], ["four", "seven", 4, "four", "four"], ["five", "six", 5, np.nan, "five"], ["six", "five", 6, np.nan, "six"], ["seven", "four", 7, np.nan, "seven"], ["eight", "three", 8, np.nan, "eight"], ["nine", "two", 9, np.nan, "nine"], ["ten", "one", "ten", np.nan, "ten"] ], columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled']) # these are all categoricals expected = pd.concat([expected[col].astype('category') for col in expected], axis=1) # stata doesn't save .category metadata tm.assert_frame_equal(parsed, expected, check_categorical=False) # File containing strls def test_read_dta12(self): parsed_117 = self.read_dta(self.dta21_117) expected = DataFrame.from_records( [ [1, "abc", "abcdefghi"], [3, "cba", "qwertywertyqwerty"], [93, "", "strl"], ], columns=['x', 'y', 'z']) tm.assert_frame_equal(parsed_117, expected, check_dtype=False) def test_read_dta18(self): parsed_118 = self.read_dta(self.dta22_118) parsed_118["Bytes"] = parsed_118["Bytes"].astype('O') expected = DataFrame.from_records( [['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0], ['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan], ['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0], ['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4], ['', '', '', 0, 0.3332999, 'option a', 1 / 3.] ], columns=['Things', 'Cities', 'Unicode_Cities_Strl', 'Ints', 'Floats', 'Bytes', 'Longs']) expected["Floats"] = expected["Floats"].astype(np.float32) for col in parsed_118.columns: tm.assert_almost_equal(parsed_118[col], expected[col]) with StataReader(self.dta22_118) as rdr: vl = rdr.variable_labels() vl_expected = {u'Unicode_Cities_Strl': u'Here are some strls with Ünicode chars', u'Longs': u'long data', u'Things': u'Here are some things', u'Bytes': u'byte data', u'Ints': u'int data', u'Cities': u'Here are some cities', u'Floats': u'float data'} tm.assert_dict_equal(vl, vl_expected) assert rdr.data_label == u'This is a Ünicode data label' def test_read_write_dta5(self): original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) def test_write_dta6(self): original = self.read_csv(self.csv3) original.index.name = 'index' original.index = original.index.astype(np.int32) original['year'] = original['year'].astype(np.int32) original['quarter'] = original['quarter'].astype(np.int32) with tm.ensure_clean() as path: original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) @pytest.mark.parametrize('version', [114, 117]) def test_read_write_dta10(self, version): original = DataFrame(data=[["string", "object", 1, 1.1, np.datetime64('2003-12-25')]], columns=['string', 'object', 'integer', 'floating', 'datetime']) original["object"] = Series(original["object"], dtype=object) original.index.name = 'index' original.index = original.index.astype(np.int32) original['integer'] = original['integer'].astype(np.int32) with tm.ensure_clean() as path: original.to_stata(path, {'datetime': 'tc'}, version=version) written_and_read_again = self.read_dta(path) # original.index is np.int32, read index is np.int64 tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) def test_stata_doc_examples(self): with tm.ensure_clean() as path: df = DataFrame(np.random.randn(10, 2), columns=list('AB')) df.to_stata(path) def test_write_preserves_original(self): # 9795 np.random.seed(423) df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd')) df.loc[2, 'a':'c'] = np.nan df_copy = df.copy() with tm.ensure_clean() as path: df.to_stata(path, write_index=False) tm.assert_frame_equal(df, df_copy) @pytest.mark.parametrize('version', [114, 117]) def test_encoding(self, version): # GH 4626, proper encoding handling raw = read_stata(self.dta_encoding) with tm.assert_produces_warning(FutureWarning): encoded = read_stata(self.dta_encoding, encoding='latin-1') result = encoded.kreis1849[0] expected = raw.kreis1849[0] assert result == expected assert isinstance(result, compat.string_types) with tm.ensure_clean() as path: with tm.assert_produces_warning(FutureWarning): encoded.to_stata(path, write_index=False, version=version, encoding='latin-1') reread_encoded = read_stata(path) tm.assert_frame_equal(encoded, reread_encoded) def test_read_write_dta11(self): original = DataFrame([(1, 2, 3, 4)], columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______']) formatted = DataFrame([(1, 2, 3, 4)], columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_']) formatted.index.name = 'index' formatted = formatted.astype(np.int32) with tm.ensure_clean() as path: with tm.assert_produces_warning(pd.io.stata.InvalidColumnName): original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), formatted) @pytest.mark.parametrize('version', [114, 117]) def test_read_write_dta12(self, version): original = DataFrame([(1, 2, 3, 4, 5, 6)], columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', '+', '-', 'short', 'delete']) formatted = DataFrame([(1, 2, 3, 4, 5, 6)], columns=['astringwithmorethan32characters_', '_0astringwithmorethan32character', '_', '_1_', '_short', '_delete']) formatted.index.name = 'index' formatted = formatted.astype(np.int32) with tm.ensure_clean() as path: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', InvalidColumnName) original.to_stata(path, None, version=version) # should get a warning for that format. assert len(w) == 1 written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), formatted) def test_read_write_dta13(self): s1 = Series(2 ** 9, dtype=np.int16) s2 = Series(2 ** 17, dtype=np.int32) s3 = Series(2 ** 33, dtype=np.int64) original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3}) original.index.name = 'index' formatted = original formatted['int64'] = formatted['int64'].astype(np.float64) with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.parametrize( 'file', ['dta14_113', 'dta14_114', 'dta14_115', 'dta14_117']) def test_read_write_reread_dta14(self, file, parsed_114, version): file = getattr(self, file) parsed = self.read_dta(file) parsed.index.name = 'index' expected = self.read_csv(self.csv14) cols = ['byte_', 'int_', 'long_', 'float_', 'double_'] for col in cols: expected[col] = expected[col]._convert(datetime=True, numeric=True) expected['float_'] = expected['float_'].astype(np.float32) expected['date_td'] = pd.to_datetime( expected['date_td'], errors='coerce') tm.assert_frame_equal(parsed_114, parsed) with tm.ensure_clean() as path: parsed_114.to_stata(path, {'date_td': 'td'}, version=version) written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), parsed_114) @pytest.mark.parametrize( 'file', ['dta15_113', 'dta15_114', 'dta15_115', 'dta15_117']) def test_read_write_reread_dta15(self, file): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply( datetime.strptime, args=('%Y-%m-%d',)) file = getattr(self, file) parsed = self.read_dta(file) tm.assert_frame_equal(expected, parsed) @pytest.mark.parametrize('version', [114, 117]) def test_timestamp_and_label(self, version): original = DataFrame([(1,)], columns=['variable']) time_stamp = datetime(2000, 2, 29, 14, 21) data_label = 'This is a data file.' with tm.ensure_clean() as path: original.to_stata(path, time_stamp=time_stamp, data_label=data_label, version=version) with StataReader(path) as reader: assert reader.time_stamp == '29 Feb 2000 14:21' assert reader.data_label == data_label @pytest.mark.parametrize('version', [114, 117]) def test_invalid_timestamp(self, version): original = DataFrame([(1,)], columns=['variable']) time_stamp = '01 Jan 2000, 00:00:00' with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, time_stamp=time_stamp, version=version) def test_numeric_column_names(self): original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) original.index.name = 'index' with tm.ensure_clean() as path: # should get a warning for that format. with tm.assert_produces_warning(InvalidColumnName): original.to_stata(path) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') columns = list(written_and_read_again.columns) convert_col_name = lambda x: int(x[1]) written_and_read_again.columns = map(convert_col_name, columns) tm.assert_frame_equal(original, written_and_read_again) @pytest.mark.parametrize('version', [114, 117]) def test_nan_to_missing_value(self, version): s1 = Series(np.arange(4.0), dtype=np.float32) s2 = Series(np.arange(4.0), dtype=np.float64) s1[::2] = np.nan s2[1::2] = np.nan original = DataFrame({'s1': s1, 's2': s2}) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') tm.assert_frame_equal(written_and_read_again, original) def test_no_index(self): columns = ['x', 'y'] original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns) original.index.name = 'index_not_written' with tm.ensure_clean() as path: original.to_stata(path, write_index=False) written_and_read_again = self.read_dta(path) pytest.raises( KeyError, lambda: written_and_read_again['index_not_written']) def test_string_no_dates(self): s1 = Series(['a', 'A longer string']) s2 = Series([1.0, 2.0], dtype=np.float64) original = DataFrame({'s1': s1, 's2': s2}) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) def test_large_value_conversion(self): s0 = Series([1, 99], dtype=np.int8) s1 = Series([1, 127], dtype=np.int8) s2 = Series([1, 2 ** 15 - 1], dtype=np.int16) s3 = Series([1, 2 ** 63 - 1], dtype=np.int64) original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3}) original.index.name = 'index' with tm.ensure_clean() as path: with tm.assert_produces_warning(PossiblePrecisionLoss): original.to_stata(path) written_and_read_again = self.read_dta(path) modified = original.copy() modified['s1'] = Series(modified['s1'], dtype=np.int16) modified['s2'] = Series(modified['s2'], dtype=np.int32) modified['s3'] = Series(modified['s3'], dtype=np.float64) tm.assert_frame_equal(written_and_read_again.set_index('index'), modified) def test_dates_invalid_column(self): original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) original.index.name = 'index' with tm.ensure_clean() as path: with tm.assert_produces_warning(InvalidColumnName): original.to_stata(path, {0: 'tc'}) written_and_read_again = self.read_dta(path) modified = original.copy() modified.columns = ['_0'] tm.assert_frame_equal(written_and_read_again.set_index('index'), modified) def test_105(self): # Data obtained from: # http://go.worldbank.org/ZXY29PVJ21 dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta') df = pd.read_stata(dpath) df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] df0 = pd.DataFrame(df0) df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] df0['clustnum'] = df0["clustnum"].astype(np.int16) df0['pri_schl'] = df0["pri_schl"].astype(np.int8) df0['psch_num'] = df0["psch_num"].astype(np.int8) df0['psch_dis'] = df0["psch_dis"].astype(np.float32) tm.assert_frame_equal(df.head(3), df0) def test_value_labels_old_format(self): # GH 19417 # # Test that value_labels() returns an empty dict if the file format # predates supporting value labels. dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta') reader = StataReader(dpath) assert reader.value_labels() == {} reader.close() def test_date_export_formats(self): columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty'] conversions = {c: c for c in columns} data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) original = DataFrame([data], columns=columns) original.index.name = 'index' expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time datetime(2006, 11, 20), # Day datetime(2006, 11, 19), # Week datetime(2006, 11, 1), # Month datetime(2006, 10, 1), # Quarter year datetime(2006, 7, 1), # Half year datetime(2006, 1, 1)] # Year expected = DataFrame([expected_values], columns=columns) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, conversions) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected) def test_write_missing_strings(self): original = DataFrame([["1"], [None]], columns=["foo"]) expected = DataFrame([["1"], [""]], columns=["foo"]) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.parametrize('byteorder', ['>', '<']) def test_bool_uint(self, byteorder, version): s0 = Series([0, 1, True], dtype=np.bool) s1 = Series([0, 1, 100], dtype=np.uint8) s2 = Series([0, 1, 255], dtype=np.uint8) s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16) s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16) s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32) s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32) original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3, 's4': s4, 's5': s5, 's6': s6}) original.index.name = 'index' expected = original.copy() expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32, np.int32, np.float64) for c, t in zip(expected.columns, expected_types): expected[c] = expected[c].astype(t) with tm.ensure_clean() as path: original.to_stata(path, byteorder=byteorder, version=version) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') tm.assert_frame_equal(written_and_read_again, expected) def test_variable_labels(self): with StataReader(self.dta16_115) as rdr: sr_115 = rdr.variable_labels() with StataReader(self.dta16_117) as rdr: sr_117 = rdr.variable_labels() keys = ('var1', 'var2', 'var3') labels = ('label1', 'label2', 'label3') for k, v in compat.iteritems(sr_115): assert k in sr_117 assert v == sr_117[k] assert k in keys assert v in labels def test_minimal_size_col(self): str_lens = (1, 100, 244) s = {} for str_len in str_lens: s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) with tm.ensure_clean() as path: original.to_stata(path, write_index=False) with StataReader(path) as sr: typlist = sr.typlist variables = sr.varlist formats = sr.fmtlist for variable, fmt, typ in zip(variables, formats, typlist): assert int(variable[1:]) == int(fmt[1:-1]) assert int(variable[1:]) == typ def test_excessively_long_string(self): str_lens = (1, 244, 500) s = {} for str_len in str_lens: s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path) def test_missing_value_generator(self): types = ('b', 'h', 'l') df = DataFrame([[0.0]], columns=['float_']) with tm.ensure_clean() as path: df.to_stata(path) with StataReader(path) as rdr: valid_range = rdr.VALID_RANGE expected_values = ['.' + chr(97 + i) for i in range(26)] expected_values.insert(0, '.') for t in types: offset = valid_range[t][1] for i in range(0, 27): val = StataMissingValue(offset + 1 + i) assert val.string == expected_values[i] # Test extremes for floats val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]) assert val.string == '.' val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0]) assert val.string == '.z' # Test extremes for floats val = StataMissingValue(struct.unpack( '<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]) assert val.string == '.' val = StataMissingValue(struct.unpack( '<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0]) assert val.string == '.z' @pytest.mark.parametrize( 'file', ['dta17_113', 'dta17_115', 'dta17_117']) def test_missing_value_conversion(self, file): columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_'] smv = StataMissingValue(101) keys = [key for key in iterkeys(smv.MISSING_VALUES)] keys.sort() data = [] for i in range(27): row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)] data.append(row) expected = DataFrame(data, columns=columns) parsed = read_stata(getattr(self, file), convert_missing=True) tm.assert_frame_equal(parsed, expected) def test_big_dates(self): yr = [1960, 2000, 9999, 100, 2262, 1677] mo = [1, 1, 12, 1, 4, 9] dd = [1, 1, 31, 1, 22, 23] hr = [0, 0, 23, 0, 0, 0] mm = [0, 0, 59, 0, 0, 0] ss = [0, 0, 59, 0, 0, 0] expected = [] for i in range(len(yr)): row = [] for j in range(7): if j == 0: row.append( datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i])) elif j == 6: row.append(datetime(yr[i], 1, 1)) else: row.append(datetime(yr[i], mo[i], dd[i])) expected.append(row) expected.append([pd.NaT] * 7) columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq', 'date_th', 'date_ty'] # Fixes for weekly, quarterly,half,year expected[2][2] = datetime(9999, 12, 24) expected[2][3] = datetime(9999, 12, 1) expected[2][4] = datetime(9999, 10, 1) expected[2][5] = datetime(9999, 7, 1) expected[4][2] = datetime(2262, 4, 16) expected[4][3] = expected[4][4] = datetime(2262, 4, 1) expected[4][5] = expected[4][6] = datetime(2262, 1, 1) expected[5][2] = expected[5][3] = expected[ 5][4] = datetime(1677, 10, 1) expected[5][5] = expected[5][6] = datetime(1678, 1, 1) expected = DataFrame(expected, columns=columns, dtype=np.object) parsed_115 = read_stata(self.dta18_115) parsed_117 = read_stata(self.dta18_117) tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True) tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True) date_conversion = {c: c[-2:] for c in columns} # {c : c[-2:] for c in columns} with tm.ensure_clean() as path: expected.index.name = 'index' expected.to_stata(path, date_conversion) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected, check_datetimelike_compat=True) def test_dtype_conversion(self): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) no_conversion = read_stata(self.dta15_117, convert_dates=True) tm.assert_frame_equal(expected, no_conversion) conversion = read_stata(self.dta15_117, convert_dates=True, preserve_dtypes=False) # read_csv types are the same expected = self.read_csv(self.csv15) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) tm.assert_frame_equal(expected, conversion) def test_drop_column(self): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) columns = ['byte_', 'int_', 'long_'] expected = expected[columns] dropped = read_stata(self.dta15_117, convert_dates=True, columns=columns) tm.assert_frame_equal(expected, dropped) # See PR 10757 columns = ['int_', 'long_', 'byte_'] expected = expected[columns] reordered = read_stata(self.dta15_117, convert_dates=True, columns=columns) tm.assert_frame_equal(expected, reordered) with pytest.raises(ValueError): columns = ['byte_', 'byte_'] read_stata(self.dta15_117, convert_dates=True, columns=columns) with pytest.raises(ValueError): columns = ['byte_', 'int_', 'long_', 'not_found'] read_stata(self.dta15_117, convert_dates=True, columns=columns) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.filterwarnings( "ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch" ) def test_categorical_writing(self, version): original = DataFrame.from_records( [ ["one", "ten", "one", "one", "one", 1], ["two", "nine", "two", "two", "two", 2], ["three", "eight", "three", "three", "three", 3], ["four", "seven", 4, "four", "four", 4], ["five", "six", 5, np.nan, "five", 5], ["six", "five", 6, np.nan, "six", 6], ["seven", "four", 7, np.nan, "seven", 7], ["eight", "three", 8, np.nan, "eight", 8], ["nine", "two", 9, np.nan, "nine", 9], ["ten", "one", "ten", np.nan, "ten", 10] ], columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled', 'unlabeled']) expected = original.copy() # these are all categoricals original = pd.concat([original[col].astype('category') for col in original], axis=1) expected['incompletely_labeled'] = expected[ 'incompletely_labeled'].apply(str) expected['unlabeled'] = expected['unlabeled'].apply(str) expected = pd.concat([expected[col].astype('category') for col in expected], axis=1) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) res = written_and_read_again.set_index('index') tm.assert_frame_equal(res, expected, check_categorical=False) def test_categorical_warnings_and_errors(self): # Warning for non-string labels # Error for labels too long original = pd.DataFrame.from_records( [['a' * 10000], ['b' * 10000], ['c' * 10000], ['d' * 10000]], columns=['Too_long']) original = pd.concat([original[col].astype('category') for col in original], axis=1) with tm.ensure_clean() as path: pytest.raises(ValueError, original.to_stata, path) original = pd.DataFrame.from_records( [['a'], ['b'], ['c'], ['d'], [1]], columns=['Too_long']) original = pd.concat([original[col].astype('category') for col in original], axis=1) with tm.assert_produces_warning(pd.io.stata.ValueLabelTypeMismatch): original.to_stata(path) # should get a warning for mixed content @pytest.mark.parametrize('version', [114, 117]) def test_categorical_with_stata_missing_values(self, version): values = [['a' + str(i)] for i in range(120)] values.append([np.nan]) original = pd.DataFrame.from_records(values, columns=['many_labels']) original = pd.concat([original[col].astype('category') for col in original], axis=1) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) res = written_and_read_again.set_index('index') tm.assert_frame_equal(res, original, check_categorical=False) @pytest.mark.parametrize( 'file', ['dta19_115', 'dta19_117']) def test_categorical_order(self, file): # Directly construct using expected codes # Format is is_cat, col_name, labels (in order), underlying data expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)), (True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]), (True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])), (True, 'floating', [ 'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)), (True, 'float_missing', [ 'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])), (False, 'nolabel', [ 1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)), (True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))] cols = [] for is_cat, col, labels, codes in expected: if is_cat: cols.append((col, pd.Categorical.from_codes(codes, labels))) else: cols.append((col, pd.Series(labels, dtype=np.float32))) expected = DataFrame.from_dict(OrderedDict(cols)) # Read with and with out categoricals, ensure order is identical file = getattr(self, file) parsed = read_stata(file) tm.assert_frame_equal(expected, parsed, check_categorical=False) # Check identity of codes for col in expected: if is_categorical_dtype(expected[col]): tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes) tm.assert_index_equal(expected[col].cat.categories, parsed[col].cat.categories) @pytest.mark.parametrize( 'file', ['dta20_115', 'dta20_117']) def test_categorical_sorting(self, file): parsed = read_stata(getattr(self, file)) # Sort based on codes, not strings parsed = parsed.sort_values("srh", na_position='first') # Don't sort index parsed.index = np.arange(parsed.shape[0]) codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4] categories = ["Poor", "Fair", "Good", "Very good", "Excellent"] cat = pd.Categorical.from_codes(codes=codes, categories=categories) expected = pd.Series(cat, name='srh') tm.assert_series_equal(expected, parsed["srh"], check_categorical=False) @pytest.mark.parametrize( 'file', ['dta19_115', 'dta19_117']) def test_categorical_ordering(self, file): file = getattr(self, file) parsed = read_stata(file) parsed_unordered = read_stata(file, order_categoricals=False) for col in parsed: if not is_categorical_dtype(parsed[col]): continue assert parsed[col].cat.ordered assert not parsed_unordered[col].cat.ordered @pytest.mark.parametrize( 'file', ['dta1_117', 'dta2_117', 'dta3_117', 'dta4_117', 'dta14_117', 'dta15_117', 'dta16_117', 'dta17_117', 'dta18_117', 'dta19_117', 'dta20_117']) @pytest.mark.parametrize( 'chunksize', [1, 2]) @pytest.mark.parametrize( 'convert_categoricals', [False, True]) @pytest.mark.parametrize( 'convert_dates', [False, True]) def test_read_chunks_117(self, file, chunksize, convert_categoricals, convert_dates): fname = getattr(self, file) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed = read_stata( fname, convert_categoricals=convert_categoricals, convert_dates=convert_dates) itr = read_stata( fname, iterator=True, convert_categoricals=convert_categoricals, convert_dates=convert_dates) pos = 0 for j in range(5): with warnings.catch_warnings(record=True) as w: # noqa warnings.simplefilter("always") try: chunk = itr.read(chunksize) except StopIteration: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal( from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, check_categorical=False) pos += chunksize itr.close() def test_iterator(self): fname = self.dta3_117 parsed = read_stata(fname) with read_stata(fname, iterator=True) as itr: chunk = itr.read(5) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) with read_stata(fname, chunksize=5) as itr: chunk = list(itr) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0]) with read_stata(fname, iterator=True) as itr: chunk = itr.get_chunk(5) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) with read_stata(fname, chunksize=5) as itr: chunk = itr.get_chunk() tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) # GH12153 with read_stata(fname, chunksize=4) as itr: from_chunks = pd.concat(itr) tm.assert_frame_equal(parsed, from_chunks) @pytest.mark.parametrize( 'file', ['dta2_115', 'dta3_115', 'dta4_115', 'dta14_115', 'dta15_115', 'dta16_115', 'dta17_115', 'dta18_115', 'dta19_115', 'dta20_115']) @pytest.mark.parametrize( 'chunksize', [1, 2]) @pytest.mark.parametrize( 'convert_categoricals', [False, True]) @pytest.mark.parametrize( 'convert_dates', [False, True]) def test_read_chunks_115(self, file, chunksize, convert_categoricals, convert_dates): fname = getattr(self, file) # Read the whole file with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed = read_stata( fname, convert_categoricals=convert_categoricals, convert_dates=convert_dates) # Compare to what we get when reading by chunk itr = read_stata( fname, iterator=True, convert_dates=convert_dates, convert_categoricals=convert_categoricals) pos = 0 for j in range(5): with warnings.catch_warnings(record=True) as w: # noqa warnings.simplefilter("always") try: chunk = itr.read(chunksize) except StopIteration: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal( from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, check_categorical=False) pos += chunksize itr.close() def test_read_chunks_columns(self): fname = self.dta3_117 columns = ['quarter', 'cpi', 'm1'] chunksize = 2 parsed = read_stata(fname, columns=columns) with read_stata(fname, iterator=True) as itr: pos = 0 for j in range(5): chunk = itr.read(chunksize, columns=columns) if chunk is None: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal(from_frame, chunk, check_dtype=False) pos += chunksize @pytest.mark.parametrize('version', [114, 117]) def test_write_variable_labels(self, version): # GH 13631, add support for writing variable labels original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) original.index.name = 'index' variable_labels = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'} with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels, version=version) with StataReader(path) as sr: read_labels = sr.variable_labels() expected_labels = {'index': '', 'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'} assert read_labels == expected_labels variable_labels['index'] = 'The Index' with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels, version=version) with StataReader(path) as sr: read_labels = sr.variable_labels() assert read_labels == variable_labels @pytest.mark.parametrize('version', [114, 117]) def test_invalid_variable_labels(self, version): original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) original.index.name = 'index' variable_labels = {'a': 'very long' * 10, 'b': 'City Exponent', 'c': 'City'} with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, variable_labels=variable_labels, version=version) variable_labels['a'] = u'invalid character Œ' with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, variable_labels=variable_labels, version=version) def test_write_variable_label_errors(self): original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) values = [u'\u03A1', u'\u0391', u'\u039D', u'\u0394', u'\u0391', u'\u03A3'] variable_labels_utf8 = {'a': 'City Rank', 'b': 'City Exponent', 'c': u''.join(values)} with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_utf8) variable_labels_long = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'A very, very, very long variable label ' 'that is too long for Stata which means ' 'that it has more than 80 characters'} with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_long) def test_default_date_conversion(self): # GH 12259 dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with tm.ensure_clean() as path: original.to_stata(path, write_index=False) reread = read_stata(path, convert_dates=True) tm.assert_frame_equal(original, reread) original.to_stata(path, write_index=False, convert_dates={'dates': 'tc'}) direct = read_stata(path, convert_dates=True) tm.assert_frame_equal(reread, direct) dates_idx = original.columns.tolist().index('dates') original.to_stata(path, write_index=False, convert_dates={dates_idx: 'tc'}) direct = read_stata(path, convert_dates=True) tm.assert_frame_equal(reread, direct) def test_unsupported_type(self): original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path) def test_unsupported_datetype(self): dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path, convert_dates={'dates': 'tC'}) dates = pd.date_range('1-1-1990', periods=3, tz='Asia/Hong_Kong') original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path) def test_repeated_column_labels(self): # GH 13923 with pytest.raises(ValueError) as cm: read_stata(self.dta23, convert_categoricals=True) assert 'wolof' in cm.exception def test_stata_111(self): # 111 is an old version but still used by current versions of # SAS when exporting to Stata format. We do not know of any # on-line documentation for this version. df = read_stata(self.dta24_111) original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0], 'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6], 'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3], 'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h', 'i', 'j']}) original = original[['y', 'x', 'w', 'z']] tm.assert_frame_equal(original, df) def test_out_of_range_double(self): # GH 14618 df = DataFrame({'ColumnOk': [0.0, np.finfo(np.double).eps, 4.49423283715579e+307], 'ColumnTooBig': [0.0, np.finfo(np.double).eps, np.finfo(np.double).max]}) with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: df.to_stata(path) assert 'ColumnTooBig' in cm.exception df.loc[2, 'ColumnTooBig'] = np.inf with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: df.to_stata(path) assert 'ColumnTooBig' in cm.exception assert 'infinity' in cm.exception def test_out_of_range_float(self): original = DataFrame({'ColumnOk': [0.0, np.finfo(np.float32).eps, np.finfo(np.float32).max / 10.0], 'ColumnTooBig': [0.0, np.finfo(np.float32).eps, np.finfo(np.float32).max]}) original.index.name = 'index' for col in original: original[col] = original[col].astype(np.float32) with tm.ensure_clean() as path: original.to_stata(path) reread = read_stata(path) original['ColumnTooBig'] = original['ColumnTooBig'].astype( np.float64) tm.assert_frame_equal(original, reread.set_index('index')) original.loc[2, 'ColumnTooBig'] = np.inf with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: original.to_stata(path) assert 'ColumnTooBig' in cm.exception assert 'infinity' in cm.exception def test_path_pathlib(self): df = tm.makeDataFrame() df.index.name = 'index' reader = lambda x: read_stata(x).set_index('index') result = tm.round_trip_pathlib(df.to_stata, reader) tm.assert_frame_equal(df, result) def test_pickle_path_localpath(self): df = tm.makeDataFrame() df.index.name = 'index' reader = lambda x: read_stata(x).set_index('index') result = tm.round_trip_localpath(df.to_stata, reader) tm.assert_frame_equal(df, result) @pytest.mark.parametrize( 'write_index', [True, False]) def test_value_labels_iterator(self, write_index): # GH 16923 d = {'A': ['B', 'E', 'C', 'A', 'E']} df = pd.DataFrame(data=d) df['A'] = df['A'].astype('category') with tm.ensure_clean() as path: df.to_stata(path, write_index=write_index) with pd.read_stata(path, iterator=True) as dta_iter: value_labels = dta_iter.value_labels() assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}} def test_set_index(self): # GH 17328 df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: df.to_stata(path) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) @pytest.mark.parametrize( 'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr']) def test_date_parsing_ignores_format_details(self, column): # GH 17797 # # Test that display formats are ignored when determining if a numeric # column is a date value. # # All date types are stored as numbers and format associated with the # column denotes both the type of the date and the display format. # # STATA supports 9 date types which each have distinct units. We test 7 # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that # accounts for leap seconds and %tb relies on STATAs business calendar. df = read_stata(self.stata_dates) unformatted = df.loc[0, column] formatted = df.loc[0, column + "_fmt"] assert unformatted == formatted def test_writer_117(self): original = DataFrame(data=[['string', 'object', 1, 1, 1, 1.1, 1.1, np.datetime64('2003-12-25'), 'a', 'a' * 2045, 'a' * 5000, 'a'], ['string-1', 'object-1', 1, 1, 1, 1.1, 1.1, np.datetime64('2003-12-26'), 'b', 'b' * 2045, '', ''] ], columns=['string', 'object', 'int8', 'int16', 'int32', 'float32', 'float64', 'datetime', 's1', 's2045', 'srtl', 'forced_strl']) original['object'] = Series(original['object'], dtype=object) original['int8'] = Series(original['int8'], dtype=np.int8) original['int16'] = Series(original['int16'], dtype=np.int16) original['int32'] = original['int32'].astype(np.int32) original['float32'] = Series(original['float32'], dtype=np.float32) original.index.name = 'index' original.index = original.index.astype(np.int32) copy = original.copy() with tm.ensure_clean() as path: original.to_stata(path, convert_dates={'datetime': 'tc'}, convert_strl=['forced_strl'], version=117) written_and_read_again = self.read_dta(path) # original.index is np.int32, read index is np.int64 tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) tm.assert_frame_equal(original, copy) def test_convert_strl_name_swap(self): original = DataFrame([['a' * 3000, 'A', 'apple'], ['b' * 1000, 'B', 'banana']], columns=['long1' * 10, 'long', 1]) original.index.name = 'index' with tm.assert_produces_warning(pd.io.stata.InvalidColumnName): with tm.ensure_clean() as path: original.to_stata(path, convert_strl=['long', 1], version=117) reread = self.read_dta(path) reread = reread.set_index('index') reread.columns = original.columns tm.assert_frame_equal(reread, original, check_index_type=False) def test_invalid_date_conversion(self): # GH 12259 dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) @pytest.mark.parametrize('version', [114, 117]) def test_nonfile_writing(self, version): # GH 21041 bio = io.BytesIO() df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: df.to_stata(bio, version=version) bio.seek(0) with open(path, 'wb') as dta: dta.write(bio.read()) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) def test_gzip_writing(self): # writing version 117 requires seek and cannot be used with gzip df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: with gzip.GzipFile(path, 'wb') as gz: df.to_stata(gz, version=114) with gzip.GzipFile(path, 'rb') as gz: reread = pd.read_stata(gz, index_col='index') tm.assert_frame_equal(df, reread) def test_unicode_dta_118(self): unicode_df = self.read_dta(self.dta25_118) columns = ['utf8', 'latin1', 'ascii', 'utf8_strl', 'ascii_strl'] values = [[u'ραηδας', u'PÄNDÄS', 'p', u'ραηδας', 'p'], [u'ƤĀńĐąŜ', u'Ö', 'a', u'ƤĀńĐąŜ', 'a'], [u'ᴘᴀᴎᴅᴀS', u'Ü', 'n', u'ᴘᴀᴎᴅᴀS', 'n'], [' ', ' ', 'd', ' ', 'd'], [' ', '', 'a', ' ', 'a'], ['', '', 's', '', 's'], ['', '', ' ', '', ' ']] expected = pd.DataFrame(values, columns=columns) tm.assert_frame_equal(unicode_df, expected)
dsm054/pandas
pandas/tests/io/test_stata.py
pandas/_version.py
""" Read SAS7BDAT files Based on code written by Jared Hobbs: https://bitbucket.org/jaredhobbs/sas7bdat See also: https://github.com/BioStatMatt/sas7bdat Partial documentation of the file format: https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ from datetime import datetime import struct import numpy as np from pandas.errors import EmptyDataError import pandas as pd from pandas import compat from pandas.io.common import BaseIterator, get_filepath_or_buffer from pandas.io.sas._sas import Parser import pandas.io.sas.sas_constants as const class _subheader_pointer(object): pass class _column(object): pass # SAS7BDAT represents a SAS data file in SAS7BDAT format. class SAS7BDATReader(BaseIterator): """ Read SAS files in SAS7BDAT format. Parameters ---------- path_or_buf : path name or buffer Name of SAS file or file-like object pointing to SAS file contents. index : column identifier, defaults to None Column to use as index. convert_dates : boolean, defaults to True Attempt to convert dates to Pandas datetime values. Note that some rarely used SAS date formats may be unsupported. blank_missing : boolean, defaults to True Convert empty strings to missing values (SAS uses blanks to indicate missing character variables). chunksize : int, defaults to None Return SAS7BDATReader object for iterations, returns chunks with given number of lines. encoding : string, defaults to None String encoding. convert_text : bool, defaults to True If False, text variables are left as raw bytes. convert_header_text : bool, defaults to True If False, header text, including column names, are left as raw bytes. """ def __init__(self, path_or_buf, index=None, convert_dates=True, blank_missing=True, chunksize=None, encoding=None, convert_text=True, convert_header_text=True): self.index = index self.convert_dates = convert_dates self.blank_missing = blank_missing self.chunksize = chunksize self.encoding = encoding self.convert_text = convert_text self.convert_header_text = convert_header_text self.default_encoding = "latin-1" self.compression = "" self.column_names_strings = [] self.column_names = [] self.column_formats = [] self.columns = [] self._current_page_data_subheader_pointers = [] self._cached_page = None self._column_data_lengths = [] self._column_data_offsets = [] self._column_types = [] self._current_row_in_file_index = 0 self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) if isinstance(self._path_or_buf, compat.string_types): self._path_or_buf = open(self._path_or_buf, 'rb') self.handle = self._path_or_buf self._get_properties() self._parse_metadata() def column_data_lengths(self): """Return a numpy int64 array of the column data lengths""" return np.asarray(self._column_data_lengths, dtype=np.int64) def column_data_offsets(self): """Return a numpy int64 array of the column offsets""" return np.asarray(self._column_data_offsets, dtype=np.int64) def column_types(self): """Returns a numpy character array of the column types: s (string) or d (double)""" return np.asarray(self._column_types, dtype=np.dtype('S1')) def close(self): try: self.handle.close() except AttributeError: pass def _get_properties(self): # Check magic number self._path_or_buf.seek(0) self._cached_page = self._path_or_buf.read(288) if self._cached_page[0:len(const.magic)] != const.magic: self.close() raise ValueError("magic number mismatch (not a SAS file?)") # Get alignment information align1, align2 = 0, 0 buf = self._read_bytes(const.align_1_offset, const.align_1_length) if buf == const.u64_byte_checker_value: align2 = const.align_2_value self.U64 = True self._int_length = 8 self._page_bit_offset = const.page_bit_offset_x64 self._subheader_pointer_length = const.subheader_pointer_length_x64 else: self.U64 = False self._page_bit_offset = const.page_bit_offset_x86 self._subheader_pointer_length = const.subheader_pointer_length_x86 self._int_length = 4 buf = self._read_bytes(const.align_2_offset, const.align_2_length) if buf == const.align_1_checker_value: align1 = const.align_2_value total_align = align1 + align2 # Get endianness information buf = self._read_bytes(const.endianness_offset, const.endianness_length) if buf == b'\x01': self.byte_order = "<" else: self.byte_order = ">" # Get encoding information buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] if buf in const.encoding_names: self.file_encoding = const.encoding_names[buf] else: self.file_encoding = "unknown (code=%s)" % str(buf) # Get platform information buf = self._read_bytes(const.platform_offset, const.platform_length) if buf == b'1': self.platform = "unix" elif buf == b'2': self.platform = "windows" else: self.platform = "unknown" buf = self._read_bytes(const.dataset_offset, const.dataset_length) self.name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.name = self.name.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.file_type_offset, const.file_type_length) self.file_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.file_type = self.file_type.decode( self.encoding or self.default_encoding) # Timestamp is epoch 01/01/1960 epoch = datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') x = self._read_float(const.date_modified_offset + align1, const.date_modified_length) self.date_modified = epoch + pd.to_timedelta(x, unit='s') self.header_length = self._read_int(const.header_size_offset + align1, const.header_size_length) # Read the rest of the header into cached_page. buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf if len(self._cached_page) != self.header_length: self.close() raise ValueError("The SAS7BDAT file appears to be truncated.") self._page_length = self._read_int(const.page_size_offset + align1, const.page_size_length) self._page_count = self._read_int(const.page_count_offset + align1, const.page_count_length) buf = self._read_bytes(const.sas_release_offset + total_align, const.sas_release_length) self.sas_release = buf.rstrip(b'\x00 ') if self.convert_header_text: self.sas_release = self.sas_release.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.sas_server_type_offset + total_align, const.sas_server_type_length) self.server_type = buf.rstrip(b'\x00 ') if self.convert_header_text: self.server_type = self.server_type.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_version_number_offset + total_align, const.os_version_number_length) self.os_version = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_version = self.os_version.decode( self.encoding or self.default_encoding) buf = self._read_bytes(const.os_name_offset + total_align, const.os_name_length) buf = buf.rstrip(b'\x00 ') if len(buf) > 0: self.os_name = buf.decode(self.encoding or self.default_encoding) else: buf = self._read_bytes(const.os_maker_offset + total_align, const.os_maker_length) self.os_name = buf.rstrip(b'\x00 ') if self.convert_header_text: self.os_name = self.os_name.decode( self.encoding or self.default_encoding) def __next__(self): da = self.read(nrows=self.chunksize or 1) if da is None: raise StopIteration return da # Read a single float of the given width (4 or 8). def _read_float(self, offset, width): if width not in (4, 8): self.close() raise ValueError("invalid float width") buf = self._read_bytes(offset, width) fd = "f" if width == 4 else "d" return struct.unpack(self.byte_order + fd, buf)[0] # Read a single signed integer of the given width (1, 2, 4 or 8). def _read_int(self, offset, width): if width not in (1, 2, 4, 8): self.close() raise ValueError("invalid int width") buf = self._read_bytes(offset, width) it = {1: "b", 2: "h", 4: "l", 8: "q"}[width] iv = struct.unpack(self.byte_order + it, buf)[0] return iv def _read_bytes(self, offset, length): if self._cached_page is None: self._path_or_buf.seek(offset) buf = self._path_or_buf.read(length) if len(buf) < length: self.close() msg = "Unable to read {:d} bytes from file position {:d}." raise ValueError(msg.format(length, offset)) return buf else: if offset + length > len(self._cached_page): self.close() raise ValueError("The cached page is too small.") return self._cached_page[offset:offset + length] def _parse_metadata(self): done = False while not done: self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: self.close() raise ValueError( "Failed to read a meta data page from the SAS file.") done = self._process_page_meta() def _process_page_meta(self): self._read_page_header() pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types if self._current_page_type in pt: self._process_page_metadata() is_data_page = self._current_page_type & const.page_data_type is_mix_page = self._current_page_type in const.page_mix_types return (is_data_page or is_mix_page or self._current_page_data_subheader_pointers != []) def _read_page_header(self): bit_offset = self._page_bit_offset tx = const.page_type_offset + bit_offset self._current_page_type = self._read_int(tx, const.page_type_length) tx = const.block_count_offset + bit_offset self._current_page_block_count = self._read_int( tx, const.block_count_length) tx = const.subheader_count_offset + bit_offset self._current_page_subheaders_count = ( self._read_int(tx, const.subheader_count_length)) def _process_page_metadata(self): bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): pointer = self._process_subheader_pointers( const.subheader_pointers_offset + bit_offset, i) if pointer.length == 0: continue if pointer.compression == const.truncated_subheader_id: continue subheader_signature = self._read_subheader_signature( pointer.offset) subheader_index = ( self._get_subheader_index(subheader_signature, pointer.compression, pointer.ptype)) self._process_subheader(subheader_index, pointer) def _get_subheader_index(self, signature, compression, ptype): index = const.subheader_signature_to_index.get(signature) if index is None: f1 = ((compression == const.compressed_subheader_id) or (compression == 0)) f2 = (ptype == const.compressed_subheader_type) if (self.compression != "") and f1 and f2: index = const.SASIndex.data_subheader_index else: self.close() raise ValueError("Unknown subheader signature") return index def _process_subheader_pointers(self, offset, subheader_pointer_index): subheader_pointer_length = self._subheader_pointer_length total_offset = (offset + subheader_pointer_length * subheader_pointer_index) subheader_offset = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_length = self._read_int(total_offset, self._int_length) total_offset += self._int_length subheader_compression = self._read_int(total_offset, 1) total_offset += 1 subheader_type = self._read_int(total_offset, 1) x = _subheader_pointer() x.offset = subheader_offset x.length = subheader_length x.compression = subheader_compression x.ptype = subheader_type return x def _read_subheader_signature(self, offset): subheader_signature = self._read_bytes(offset, self._int_length) return subheader_signature def _process_subheader(self, subheader_index, pointer): offset = pointer.offset length = pointer.length if subheader_index == const.SASIndex.row_size_index: processor = self._process_rowsize_subheader elif subheader_index == const.SASIndex.column_size_index: processor = self._process_columnsize_subheader elif subheader_index == const.SASIndex.column_text_index: processor = self._process_columntext_subheader elif subheader_index == const.SASIndex.column_name_index: processor = self._process_columnname_subheader elif subheader_index == const.SASIndex.column_attributes_index: processor = self._process_columnattributes_subheader elif subheader_index == const.SASIndex.format_and_label_index: processor = self._process_format_subheader elif subheader_index == const.SASIndex.column_list_index: processor = self._process_columnlist_subheader elif subheader_index == const.SASIndex.subheader_counts_index: processor = self._process_subheader_counts elif subheader_index == const.SASIndex.data_subheader_index: self._current_page_data_subheader_pointers.append(pointer) return else: raise ValueError("unknown subheader index") processor(offset, length) def _process_rowsize_subheader(self, offset, length): int_len = self._int_length lcs_offset = offset lcp_offset = offset if self.U64: lcs_offset += 682 lcp_offset += 706 else: lcs_offset += 354 lcp_offset += 378 self.row_length = self._read_int( offset + const.row_length_offset_multiplier * int_len, int_len) self.row_count = self._read_int( offset + const.row_count_offset_multiplier * int_len, int_len) self.col_count_p1 = self._read_int( offset + const.col_count_p1_multiplier * int_len, int_len) self.col_count_p2 = self._read_int( offset + const.col_count_p2_multiplier * int_len, int_len) mx = const.row_count_on_mix_page_offset_multiplier * int_len self._mix_page_row_count = self._read_int(offset + mx, int_len) self._lcs = self._read_int(lcs_offset, 2) self._lcp = self._read_int(lcp_offset, 2) def _process_columnsize_subheader(self, offset, length): int_len = self._int_length offset += int_len self.column_count = self._read_int(offset, int_len) if (self.col_count_p1 + self.col_count_p2 != self.column_count): print("Warning: column count mismatch (%d + %d != %d)\n", self.col_count_p1, self.col_count_p2, self.column_count) # Unknown purpose def _process_subheader_counts(self, offset, length): pass def _process_columntext_subheader(self, offset, length): offset += self._int_length text_block_size = self._read_int(offset, const.text_block_size_length) buf = self._read_bytes(offset, text_block_size) cname_raw = buf[0:text_block_size].rstrip(b"\x00 ") cname = cname_raw if self.convert_header_text: cname = cname.decode(self.encoding or self.default_encoding) self.column_names_strings.append(cname) if len(self.column_names_strings) == 1: compression_literal = "" for cl in const.compression_literals: if cl in cname_raw: compression_literal = cl self.compression = compression_literal offset -= self._int_length offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) compression_literal = buf.rstrip(b"\x00") if compression_literal == "": self._lcs = 0 offset1 = offset + 32 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif compression_literal == const.rle_compression: offset1 = offset + 40 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif self._lcs > 0: self._lcp = 0 offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcs) self.creator_proc = buf[0:self._lcp] if self.convert_header_text: if hasattr(self, "creator_proc"): self.creator_proc = self.creator_proc.decode( self.encoding or self.default_encoding) def _process_columnname_subheader(self, offset, length): int_len = self._int_length offset += int_len column_name_pointers_count = (length - 2 * int_len - 12) // 8 for i in range(column_name_pointers_count): text_subheader = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_text_subheader_offset col_name_offset = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_offset_offset col_name_length = offset + const.column_name_pointer_length * \ (i + 1) + const.column_name_length_offset idx = self._read_int( text_subheader, const.column_name_text_subheader_length) col_offset = self._read_int( col_name_offset, const.column_name_offset_length) col_len = self._read_int( col_name_length, const.column_name_length_length) name_str = self.column_names_strings[idx] self.column_names.append(name_str[col_offset:col_offset + col_len]) def _process_columnattributes_subheader(self, offset, length): int_len = self._int_length column_attributes_vectors_count = ( length - 2 * int_len - 12) // (int_len + 8) for i in range(column_attributes_vectors_count): col_data_offset = (offset + int_len + const.column_data_offset_offset + i * (int_len + 8)) col_data_len = (offset + 2 * int_len + const.column_data_length_offset + i * (int_len + 8)) col_types = (offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)) x = self._read_int(col_data_offset, int_len) self._column_data_offsets.append(x) x = self._read_int(col_data_len, const.column_data_length_length) self._column_data_lengths.append(x) x = self._read_int(col_types, const.column_type_length) self._column_types.append(b'd' if x == 1 else b's') def _process_columnlist_subheader(self, offset, length): # unknown purpose pass def _process_format_subheader(self, offset, length): int_len = self._int_length text_subheader_format = ( offset + const.column_format_text_subheader_index_offset + 3 * int_len) col_format_offset = (offset + const.column_format_offset_offset + 3 * int_len) col_format_len = (offset + const.column_format_length_offset + 3 * int_len) text_subheader_label = ( offset + const.column_label_text_subheader_index_offset + 3 * int_len) col_label_offset = (offset + const.column_label_offset_offset + 3 * int_len) col_label_len = offset + const.column_label_length_offset + 3 * int_len x = self._read_int(text_subheader_format, const.column_format_text_subheader_index_length) format_idx = min(x, len(self.column_names_strings) - 1) format_start = self._read_int( col_format_offset, const.column_format_offset_length) format_len = self._read_int( col_format_len, const.column_format_length_length) label_idx = self._read_int( text_subheader_label, const.column_label_text_subheader_index_length) label_idx = min(label_idx, len(self.column_names_strings) - 1) label_start = self._read_int( col_label_offset, const.column_label_offset_length) label_len = self._read_int(col_label_len, const.column_label_length_length) label_names = self.column_names_strings[label_idx] column_label = label_names[label_start: label_start + label_len] format_names = self.column_names_strings[format_idx] column_format = format_names[format_start: format_start + format_len] current_column_number = len(self.columns) col = _column() col.col_id = current_column_number col.name = self.column_names[current_column_number] col.label = column_label col.format = column_format col.ctype = self._column_types[current_column_number] col.length = self._column_data_lengths[current_column_number] self.column_formats.append(column_format) self.columns.append(col) def read(self, nrows=None): if (nrows is None) and (self.chunksize is not None): nrows = self.chunksize elif nrows is None: nrows = self.row_count if len(self._column_types) == 0: self.close() raise EmptyDataError("No columns to parse from file") if self._current_row_in_file_index >= self.row_count: return None m = self.row_count - self._current_row_in_file_index if nrows > m: nrows = m nd = self._column_types.count(b'd') ns = self._column_types.count(b's') self._string_chunk = np.empty((ns, nrows), dtype=np.object) self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) self._current_row_in_chunk_index = 0 p = Parser(self) p.read(nrows) rslt = self._chunk_to_dataframe() if self.index is not None: rslt = rslt.set_index(self.index) return rslt def _read_next_page(self): self._current_page_data_subheader_pointers = [] self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: self.close() msg = ("failed to read complete page from file " "(read {:d} of {:d} bytes)") raise ValueError(msg.format(len(self._cached_page), self._page_length)) self._read_page_header() page_type = self._current_page_type if page_type == const.page_meta_type: self._process_page_metadata() is_data_page = page_type & const.page_data_type pt = [const.page_meta_type] + const.page_mix_types if not is_data_page and self._current_page_type not in pt: return self._read_next_page() return False def _chunk_to_dataframe(self): n = self._current_row_in_chunk_index m = self._current_row_in_file_index ix = range(m - n, m) rslt = pd.DataFrame(index=ix) js, jb = 0, 0 for j in range(self.column_count): name = self.column_names[j] if self._column_types[j] == b'd': rslt[name] = self._byte_chunk[jb, :].view( dtype=self.byte_order + 'd') rslt[name] = np.asarray(rslt[name], dtype=np.float64) if self.convert_dates: unit = None if self.column_formats[j] in const.sas_date_formats: unit = 'd' elif self.column_formats[j] in const.sas_datetime_formats: unit = 's' if unit: rslt[name] = pd.to_datetime(rslt[name], unit=unit, origin="1960-01-01") jb += 1 elif self._column_types[j] == b's': rslt[name] = self._string_chunk[js, :] if self.convert_text and (self.encoding is not None): rslt[name] = rslt[name].str.decode( self.encoding or self.default_encoding) if self.blank_missing: ii = rslt[name].str.len() == 0 rslt.loc[ii, name] = np.nan js += 1 else: self.close() raise ValueError("unknown column type %s" % self._column_types[j]) return rslt
# -*- coding: utf-8 -*- # pylint: disable=E1101 import datetime as dt import io import gzip import os import struct import warnings from collections import OrderedDict from datetime import datetime import numpy as np import pytest import pandas as pd import pandas.util.testing as tm import pandas.compat as compat from pandas.compat import iterkeys from pandas.core.dtypes.common import is_categorical_dtype from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv from pandas.io.stata import (InvalidColumnName, PossiblePrecisionLoss, StataMissingValue, StataReader, read_stata) @pytest.fixture def dirpath(datapath): return datapath("io", "data") @pytest.fixture def parsed_114(dirpath): dta14_114 = os.path.join(dirpath, 'stata5_114.dta') parsed_114 = read_stata(dta14_114, convert_dates=True) parsed_114.index.name = 'index' return parsed_114 class TestStata(object): @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath("io", "data") self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta') self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta') self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta') self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta') self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta') self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta') self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta') self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta') self.csv3 = os.path.join(self.dirpath, 'stata3.csv') self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta') self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta') self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta') self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta') self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta') self.csv14 = os.path.join(self.dirpath, 'stata5.csv') self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta') self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta') self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta') self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta') self.csv15 = os.path.join(self.dirpath, 'stata6.csv') self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta') self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta') self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta') self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta') self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta') self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta') self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta') self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta') self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta') self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta') self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta') self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta') self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta') self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta') self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta') self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta') self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta') self.dta23 = os.path.join(self.dirpath, 'stata15.dta') self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta') self.dta25_118 = os.path.join(self.dirpath, 'stata16_118.dta') self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta') def read_dta(self, file): # Legacy default reader configuration return read_stata(file, convert_dates=True) def read_csv(self, file): return read_csv(file, parse_dates=True) @pytest.mark.parametrize('version', [114, 117]) def test_read_empty_dta(self, version): empty_ds = DataFrame(columns=['unit']) # GH 7369, make sure can read a 0-obs dta file with tm.ensure_clean() as path: empty_ds.to_stata(path, write_index=False, version=version) empty_ds2 = read_stata(path) tm.assert_frame_equal(empty_ds, empty_ds2) def test_data_method(self): # Minimal testing of legacy data method with StataReader(self.dta1_114) as rdr: with tm.assert_produces_warning(UserWarning): parsed_114_data = rdr.data() with StataReader(self.dta1_114) as rdr: parsed_114_read = rdr.read() tm.assert_frame_equal(parsed_114_data, parsed_114_read) @pytest.mark.parametrize( 'file', ['dta1_114', 'dta1_117']) def test_read_dta1(self, file): file = getattr(self, file) parsed = self.read_dta(file) # Pandas uses np.nan as missing value. # Thus, all columns will be of type float, regardless of their name. expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) # this is an oddity as really the nan should be float64, but # the casting doesn't fail so need to match stata here expected['float_miss'] = expected['float_miss'].astype(np.float32) tm.assert_frame_equal(parsed, expected) def test_read_dta2(self): expected = DataFrame.from_records( [ ( datetime(2006, 11, 19, 23, 13, 20), 1479596223000, datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1), datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1) ), ( datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2), datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1), datetime(1955, 1, 1), datetime(2, 1, 1) ), ( pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, ) ], columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date', 'monthly_date', 'quarterly_date', 'half_yearly_date', 'yearly_date'] ) expected['yearly_date'] = expected['yearly_date'].astype('O') with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed_114 = self.read_dta(self.dta2_114) parsed_115 = self.read_dta(self.dta2_115) parsed_117 = self.read_dta(self.dta2_117) # 113 is buggy due to limits of date format support in Stata # parsed_113 = self.read_dta(self.dta2_113) # Remove resource warnings w = [x for x in w if x.category is UserWarning] # should get warning for each call to read_dta assert len(w) == 3 # buggy test because of the NaT comparison on certain platforms # Format 113 test fails since it does not support tc and tC formats # tm.assert_frame_equal(parsed_113, expected) tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True) tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True) tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True) @pytest.mark.parametrize( 'file', ['dta3_113', 'dta3_114', 'dta3_115', 'dta3_117']) def test_read_dta3(self, file): file = getattr(self, file) parsed = self.read_dta(file) # match stata here expected = self.read_csv(self.csv3) expected = expected.astype(np.float32) expected['year'] = expected['year'].astype(np.int16) expected['quarter'] = expected['quarter'].astype(np.int8) tm.assert_frame_equal(parsed, expected) @pytest.mark.parametrize( 'file', ['dta4_113', 'dta4_114', 'dta4_115', 'dta4_117']) def test_read_dta4(self, file): file = getattr(self, file) parsed = self.read_dta(file) expected = DataFrame.from_records( [ ["one", "ten", "one", "one", "one"], ["two", "nine", "two", "two", "two"], ["three", "eight", "three", "three", "three"], ["four", "seven", 4, "four", "four"], ["five", "six", 5, np.nan, "five"], ["six", "five", 6, np.nan, "six"], ["seven", "four", 7, np.nan, "seven"], ["eight", "three", 8, np.nan, "eight"], ["nine", "two", 9, np.nan, "nine"], ["ten", "one", "ten", np.nan, "ten"] ], columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled']) # these are all categoricals expected = pd.concat([expected[col].astype('category') for col in expected], axis=1) # stata doesn't save .category metadata tm.assert_frame_equal(parsed, expected, check_categorical=False) # File containing strls def test_read_dta12(self): parsed_117 = self.read_dta(self.dta21_117) expected = DataFrame.from_records( [ [1, "abc", "abcdefghi"], [3, "cba", "qwertywertyqwerty"], [93, "", "strl"], ], columns=['x', 'y', 'z']) tm.assert_frame_equal(parsed_117, expected, check_dtype=False) def test_read_dta18(self): parsed_118 = self.read_dta(self.dta22_118) parsed_118["Bytes"] = parsed_118["Bytes"].astype('O') expected = DataFrame.from_records( [['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0], ['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan], ['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0], ['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4], ['', '', '', 0, 0.3332999, 'option a', 1 / 3.] ], columns=['Things', 'Cities', 'Unicode_Cities_Strl', 'Ints', 'Floats', 'Bytes', 'Longs']) expected["Floats"] = expected["Floats"].astype(np.float32) for col in parsed_118.columns: tm.assert_almost_equal(parsed_118[col], expected[col]) with StataReader(self.dta22_118) as rdr: vl = rdr.variable_labels() vl_expected = {u'Unicode_Cities_Strl': u'Here are some strls with Ünicode chars', u'Longs': u'long data', u'Things': u'Here are some things', u'Bytes': u'byte data', u'Ints': u'int data', u'Cities': u'Here are some cities', u'Floats': u'float data'} tm.assert_dict_equal(vl, vl_expected) assert rdr.data_label == u'This is a Ünicode data label' def test_read_write_dta5(self): original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) def test_write_dta6(self): original = self.read_csv(self.csv3) original.index.name = 'index' original.index = original.index.astype(np.int32) original['year'] = original['year'].astype(np.int32) original['quarter'] = original['quarter'].astype(np.int32) with tm.ensure_clean() as path: original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) @pytest.mark.parametrize('version', [114, 117]) def test_read_write_dta10(self, version): original = DataFrame(data=[["string", "object", 1, 1.1, np.datetime64('2003-12-25')]], columns=['string', 'object', 'integer', 'floating', 'datetime']) original["object"] = Series(original["object"], dtype=object) original.index.name = 'index' original.index = original.index.astype(np.int32) original['integer'] = original['integer'].astype(np.int32) with tm.ensure_clean() as path: original.to_stata(path, {'datetime': 'tc'}, version=version) written_and_read_again = self.read_dta(path) # original.index is np.int32, read index is np.int64 tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) def test_stata_doc_examples(self): with tm.ensure_clean() as path: df = DataFrame(np.random.randn(10, 2), columns=list('AB')) df.to_stata(path) def test_write_preserves_original(self): # 9795 np.random.seed(423) df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd')) df.loc[2, 'a':'c'] = np.nan df_copy = df.copy() with tm.ensure_clean() as path: df.to_stata(path, write_index=False) tm.assert_frame_equal(df, df_copy) @pytest.mark.parametrize('version', [114, 117]) def test_encoding(self, version): # GH 4626, proper encoding handling raw = read_stata(self.dta_encoding) with tm.assert_produces_warning(FutureWarning): encoded = read_stata(self.dta_encoding, encoding='latin-1') result = encoded.kreis1849[0] expected = raw.kreis1849[0] assert result == expected assert isinstance(result, compat.string_types) with tm.ensure_clean() as path: with tm.assert_produces_warning(FutureWarning): encoded.to_stata(path, write_index=False, version=version, encoding='latin-1') reread_encoded = read_stata(path) tm.assert_frame_equal(encoded, reread_encoded) def test_read_write_dta11(self): original = DataFrame([(1, 2, 3, 4)], columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______']) formatted = DataFrame([(1, 2, 3, 4)], columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_']) formatted.index.name = 'index' formatted = formatted.astype(np.int32) with tm.ensure_clean() as path: with tm.assert_produces_warning(pd.io.stata.InvalidColumnName): original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), formatted) @pytest.mark.parametrize('version', [114, 117]) def test_read_write_dta12(self, version): original = DataFrame([(1, 2, 3, 4, 5, 6)], columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', '+', '-', 'short', 'delete']) formatted = DataFrame([(1, 2, 3, 4, 5, 6)], columns=['astringwithmorethan32characters_', '_0astringwithmorethan32character', '_', '_1_', '_short', '_delete']) formatted.index.name = 'index' formatted = formatted.astype(np.int32) with tm.ensure_clean() as path: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', InvalidColumnName) original.to_stata(path, None, version=version) # should get a warning for that format. assert len(w) == 1 written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), formatted) def test_read_write_dta13(self): s1 = Series(2 ** 9, dtype=np.int16) s2 = Series(2 ** 17, dtype=np.int32) s3 = Series(2 ** 33, dtype=np.int64) original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3}) original.index.name = 'index' formatted = original formatted['int64'] = formatted['int64'].astype(np.float64) with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.parametrize( 'file', ['dta14_113', 'dta14_114', 'dta14_115', 'dta14_117']) def test_read_write_reread_dta14(self, file, parsed_114, version): file = getattr(self, file) parsed = self.read_dta(file) parsed.index.name = 'index' expected = self.read_csv(self.csv14) cols = ['byte_', 'int_', 'long_', 'float_', 'double_'] for col in cols: expected[col] = expected[col]._convert(datetime=True, numeric=True) expected['float_'] = expected['float_'].astype(np.float32) expected['date_td'] = pd.to_datetime( expected['date_td'], errors='coerce') tm.assert_frame_equal(parsed_114, parsed) with tm.ensure_clean() as path: parsed_114.to_stata(path, {'date_td': 'td'}, version=version) written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index('index'), parsed_114) @pytest.mark.parametrize( 'file', ['dta15_113', 'dta15_114', 'dta15_115', 'dta15_117']) def test_read_write_reread_dta15(self, file): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply( datetime.strptime, args=('%Y-%m-%d',)) file = getattr(self, file) parsed = self.read_dta(file) tm.assert_frame_equal(expected, parsed) @pytest.mark.parametrize('version', [114, 117]) def test_timestamp_and_label(self, version): original = DataFrame([(1,)], columns=['variable']) time_stamp = datetime(2000, 2, 29, 14, 21) data_label = 'This is a data file.' with tm.ensure_clean() as path: original.to_stata(path, time_stamp=time_stamp, data_label=data_label, version=version) with StataReader(path) as reader: assert reader.time_stamp == '29 Feb 2000 14:21' assert reader.data_label == data_label @pytest.mark.parametrize('version', [114, 117]) def test_invalid_timestamp(self, version): original = DataFrame([(1,)], columns=['variable']) time_stamp = '01 Jan 2000, 00:00:00' with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, time_stamp=time_stamp, version=version) def test_numeric_column_names(self): original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) original.index.name = 'index' with tm.ensure_clean() as path: # should get a warning for that format. with tm.assert_produces_warning(InvalidColumnName): original.to_stata(path) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') columns = list(written_and_read_again.columns) convert_col_name = lambda x: int(x[1]) written_and_read_again.columns = map(convert_col_name, columns) tm.assert_frame_equal(original, written_and_read_again) @pytest.mark.parametrize('version', [114, 117]) def test_nan_to_missing_value(self, version): s1 = Series(np.arange(4.0), dtype=np.float32) s2 = Series(np.arange(4.0), dtype=np.float64) s1[::2] = np.nan s2[1::2] = np.nan original = DataFrame({'s1': s1, 's2': s2}) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') tm.assert_frame_equal(written_and_read_again, original) def test_no_index(self): columns = ['x', 'y'] original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns) original.index.name = 'index_not_written' with tm.ensure_clean() as path: original.to_stata(path, write_index=False) written_and_read_again = self.read_dta(path) pytest.raises( KeyError, lambda: written_and_read_again['index_not_written']) def test_string_no_dates(self): s1 = Series(['a', 'A longer string']) s2 = Series([1.0, 2.0], dtype=np.float64) original = DataFrame({'s1': s1, 's2': s2}) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) def test_large_value_conversion(self): s0 = Series([1, 99], dtype=np.int8) s1 = Series([1, 127], dtype=np.int8) s2 = Series([1, 2 ** 15 - 1], dtype=np.int16) s3 = Series([1, 2 ** 63 - 1], dtype=np.int64) original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3}) original.index.name = 'index' with tm.ensure_clean() as path: with tm.assert_produces_warning(PossiblePrecisionLoss): original.to_stata(path) written_and_read_again = self.read_dta(path) modified = original.copy() modified['s1'] = Series(modified['s1'], dtype=np.int16) modified['s2'] = Series(modified['s2'], dtype=np.int32) modified['s3'] = Series(modified['s3'], dtype=np.float64) tm.assert_frame_equal(written_and_read_again.set_index('index'), modified) def test_dates_invalid_column(self): original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) original.index.name = 'index' with tm.ensure_clean() as path: with tm.assert_produces_warning(InvalidColumnName): original.to_stata(path, {0: 'tc'}) written_and_read_again = self.read_dta(path) modified = original.copy() modified.columns = ['_0'] tm.assert_frame_equal(written_and_read_again.set_index('index'), modified) def test_105(self): # Data obtained from: # http://go.worldbank.org/ZXY29PVJ21 dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta') df = pd.read_stata(dpath) df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]] df0 = pd.DataFrame(df0) df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"] df0['clustnum'] = df0["clustnum"].astype(np.int16) df0['pri_schl'] = df0["pri_schl"].astype(np.int8) df0['psch_num'] = df0["psch_num"].astype(np.int8) df0['psch_dis'] = df0["psch_dis"].astype(np.float32) tm.assert_frame_equal(df.head(3), df0) def test_value_labels_old_format(self): # GH 19417 # # Test that value_labels() returns an empty dict if the file format # predates supporting value labels. dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta') reader = StataReader(dpath) assert reader.value_labels() == {} reader.close() def test_date_export_formats(self): columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty'] conversions = {c: c for c in columns} data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) original = DataFrame([data], columns=columns) original.index.name = 'index' expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time datetime(2006, 11, 20), # Day datetime(2006, 11, 19), # Week datetime(2006, 11, 1), # Month datetime(2006, 10, 1), # Quarter year datetime(2006, 7, 1), # Half year datetime(2006, 1, 1)] # Year expected = DataFrame([expected_values], columns=columns) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, conversions) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected) def test_write_missing_strings(self): original = DataFrame([["1"], [None]], columns=["foo"]) expected = DataFrame([["1"], [""]], columns=["foo"]) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.parametrize('byteorder', ['>', '<']) def test_bool_uint(self, byteorder, version): s0 = Series([0, 1, True], dtype=np.bool) s1 = Series([0, 1, 100], dtype=np.uint8) s2 = Series([0, 1, 255], dtype=np.uint8) s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16) s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16) s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32) s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32) original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3, 's4': s4, 's5': s5, 's6': s6}) original.index.name = 'index' expected = original.copy() expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32, np.int32, np.float64) for c, t in zip(expected.columns, expected_types): expected[c] = expected[c].astype(t) with tm.ensure_clean() as path: original.to_stata(path, byteorder=byteorder, version=version) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') tm.assert_frame_equal(written_and_read_again, expected) def test_variable_labels(self): with StataReader(self.dta16_115) as rdr: sr_115 = rdr.variable_labels() with StataReader(self.dta16_117) as rdr: sr_117 = rdr.variable_labels() keys = ('var1', 'var2', 'var3') labels = ('label1', 'label2', 'label3') for k, v in compat.iteritems(sr_115): assert k in sr_117 assert v == sr_117[k] assert k in keys assert v in labels def test_minimal_size_col(self): str_lens = (1, 100, 244) s = {} for str_len in str_lens: s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) with tm.ensure_clean() as path: original.to_stata(path, write_index=False) with StataReader(path) as sr: typlist = sr.typlist variables = sr.varlist formats = sr.fmtlist for variable, fmt, typ in zip(variables, formats, typlist): assert int(variable[1:]) == int(fmt[1:-1]) assert int(variable[1:]) == typ def test_excessively_long_string(self): str_lens = (1, 244, 500) s = {} for str_len in str_lens: s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len]) original = DataFrame(s) with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path) def test_missing_value_generator(self): types = ('b', 'h', 'l') df = DataFrame([[0.0]], columns=['float_']) with tm.ensure_clean() as path: df.to_stata(path) with StataReader(path) as rdr: valid_range = rdr.VALID_RANGE expected_values = ['.' + chr(97 + i) for i in range(26)] expected_values.insert(0, '.') for t in types: offset = valid_range[t][1] for i in range(0, 27): val = StataMissingValue(offset + 1 + i) assert val.string == expected_values[i] # Test extremes for floats val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]) assert val.string == '.' val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0]) assert val.string == '.z' # Test extremes for floats val = StataMissingValue(struct.unpack( '<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]) assert val.string == '.' val = StataMissingValue(struct.unpack( '<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0]) assert val.string == '.z' @pytest.mark.parametrize( 'file', ['dta17_113', 'dta17_115', 'dta17_117']) def test_missing_value_conversion(self, file): columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_'] smv = StataMissingValue(101) keys = [key for key in iterkeys(smv.MISSING_VALUES)] keys.sort() data = [] for i in range(27): row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)] data.append(row) expected = DataFrame(data, columns=columns) parsed = read_stata(getattr(self, file), convert_missing=True) tm.assert_frame_equal(parsed, expected) def test_big_dates(self): yr = [1960, 2000, 9999, 100, 2262, 1677] mo = [1, 1, 12, 1, 4, 9] dd = [1, 1, 31, 1, 22, 23] hr = [0, 0, 23, 0, 0, 0] mm = [0, 0, 59, 0, 0, 0] ss = [0, 0, 59, 0, 0, 0] expected = [] for i in range(len(yr)): row = [] for j in range(7): if j == 0: row.append( datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i])) elif j == 6: row.append(datetime(yr[i], 1, 1)) else: row.append(datetime(yr[i], mo[i], dd[i])) expected.append(row) expected.append([pd.NaT] * 7) columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq', 'date_th', 'date_ty'] # Fixes for weekly, quarterly,half,year expected[2][2] = datetime(9999, 12, 24) expected[2][3] = datetime(9999, 12, 1) expected[2][4] = datetime(9999, 10, 1) expected[2][5] = datetime(9999, 7, 1) expected[4][2] = datetime(2262, 4, 16) expected[4][3] = expected[4][4] = datetime(2262, 4, 1) expected[4][5] = expected[4][6] = datetime(2262, 1, 1) expected[5][2] = expected[5][3] = expected[ 5][4] = datetime(1677, 10, 1) expected[5][5] = expected[5][6] = datetime(1678, 1, 1) expected = DataFrame(expected, columns=columns, dtype=np.object) parsed_115 = read_stata(self.dta18_115) parsed_117 = read_stata(self.dta18_117) tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True) tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True) date_conversion = {c: c[-2:] for c in columns} # {c : c[-2:] for c in columns} with tm.ensure_clean() as path: expected.index.name = 'index' expected.to_stata(path, date_conversion) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), expected, check_datetimelike_compat=True) def test_dtype_conversion(self): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) no_conversion = read_stata(self.dta15_117, convert_dates=True) tm.assert_frame_equal(expected, no_conversion) conversion = read_stata(self.dta15_117, convert_dates=True, preserve_dtypes=False) # read_csv types are the same expected = self.read_csv(self.csv15) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) tm.assert_frame_equal(expected, conversion) def test_drop_column(self): expected = self.read_csv(self.csv15) expected['byte_'] = expected['byte_'].astype(np.int8) expected['int_'] = expected['int_'].astype(np.int16) expected['long_'] = expected['long_'].astype(np.int32) expected['float_'] = expected['float_'].astype(np.float32) expected['double_'] = expected['double_'].astype(np.float64) expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',)) columns = ['byte_', 'int_', 'long_'] expected = expected[columns] dropped = read_stata(self.dta15_117, convert_dates=True, columns=columns) tm.assert_frame_equal(expected, dropped) # See PR 10757 columns = ['int_', 'long_', 'byte_'] expected = expected[columns] reordered = read_stata(self.dta15_117, convert_dates=True, columns=columns) tm.assert_frame_equal(expected, reordered) with pytest.raises(ValueError): columns = ['byte_', 'byte_'] read_stata(self.dta15_117, convert_dates=True, columns=columns) with pytest.raises(ValueError): columns = ['byte_', 'int_', 'long_', 'not_found'] read_stata(self.dta15_117, convert_dates=True, columns=columns) @pytest.mark.parametrize('version', [114, 117]) @pytest.mark.filterwarnings( "ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch" ) def test_categorical_writing(self, version): original = DataFrame.from_records( [ ["one", "ten", "one", "one", "one", 1], ["two", "nine", "two", "two", "two", 2], ["three", "eight", "three", "three", "three", 3], ["four", "seven", 4, "four", "four", 4], ["five", "six", 5, np.nan, "five", 5], ["six", "five", 6, np.nan, "six", 6], ["seven", "four", 7, np.nan, "seven", 7], ["eight", "three", 8, np.nan, "eight", 8], ["nine", "two", 9, np.nan, "nine", 9], ["ten", "one", "ten", np.nan, "ten", 10] ], columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled', 'unlabeled']) expected = original.copy() # these are all categoricals original = pd.concat([original[col].astype('category') for col in original], axis=1) expected['incompletely_labeled'] = expected[ 'incompletely_labeled'].apply(str) expected['unlabeled'] = expected['unlabeled'].apply(str) expected = pd.concat([expected[col].astype('category') for col in expected], axis=1) expected.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) res = written_and_read_again.set_index('index') tm.assert_frame_equal(res, expected, check_categorical=False) def test_categorical_warnings_and_errors(self): # Warning for non-string labels # Error for labels too long original = pd.DataFrame.from_records( [['a' * 10000], ['b' * 10000], ['c' * 10000], ['d' * 10000]], columns=['Too_long']) original = pd.concat([original[col].astype('category') for col in original], axis=1) with tm.ensure_clean() as path: pytest.raises(ValueError, original.to_stata, path) original = pd.DataFrame.from_records( [['a'], ['b'], ['c'], ['d'], [1]], columns=['Too_long']) original = pd.concat([original[col].astype('category') for col in original], axis=1) with tm.assert_produces_warning(pd.io.stata.ValueLabelTypeMismatch): original.to_stata(path) # should get a warning for mixed content @pytest.mark.parametrize('version', [114, 117]) def test_categorical_with_stata_missing_values(self, version): values = [['a' + str(i)] for i in range(120)] values.append([np.nan]) original = pd.DataFrame.from_records(values, columns=['many_labels']) original = pd.concat([original[col].astype('category') for col in original], axis=1) original.index.name = 'index' with tm.ensure_clean() as path: original.to_stata(path, version=version) written_and_read_again = self.read_dta(path) res = written_and_read_again.set_index('index') tm.assert_frame_equal(res, original, check_categorical=False) @pytest.mark.parametrize( 'file', ['dta19_115', 'dta19_117']) def test_categorical_order(self, file): # Directly construct using expected codes # Format is is_cat, col_name, labels (in order), underlying data expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)), (True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]), (True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])), (True, 'floating', [ 'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)), (True, 'float_missing', [ 'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])), (False, 'nolabel', [ 1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)), (True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))] cols = [] for is_cat, col, labels, codes in expected: if is_cat: cols.append((col, pd.Categorical.from_codes(codes, labels))) else: cols.append((col, pd.Series(labels, dtype=np.float32))) expected = DataFrame.from_dict(OrderedDict(cols)) # Read with and with out categoricals, ensure order is identical file = getattr(self, file) parsed = read_stata(file) tm.assert_frame_equal(expected, parsed, check_categorical=False) # Check identity of codes for col in expected: if is_categorical_dtype(expected[col]): tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes) tm.assert_index_equal(expected[col].cat.categories, parsed[col].cat.categories) @pytest.mark.parametrize( 'file', ['dta20_115', 'dta20_117']) def test_categorical_sorting(self, file): parsed = read_stata(getattr(self, file)) # Sort based on codes, not strings parsed = parsed.sort_values("srh", na_position='first') # Don't sort index parsed.index = np.arange(parsed.shape[0]) codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4] categories = ["Poor", "Fair", "Good", "Very good", "Excellent"] cat = pd.Categorical.from_codes(codes=codes, categories=categories) expected = pd.Series(cat, name='srh') tm.assert_series_equal(expected, parsed["srh"], check_categorical=False) @pytest.mark.parametrize( 'file', ['dta19_115', 'dta19_117']) def test_categorical_ordering(self, file): file = getattr(self, file) parsed = read_stata(file) parsed_unordered = read_stata(file, order_categoricals=False) for col in parsed: if not is_categorical_dtype(parsed[col]): continue assert parsed[col].cat.ordered assert not parsed_unordered[col].cat.ordered @pytest.mark.parametrize( 'file', ['dta1_117', 'dta2_117', 'dta3_117', 'dta4_117', 'dta14_117', 'dta15_117', 'dta16_117', 'dta17_117', 'dta18_117', 'dta19_117', 'dta20_117']) @pytest.mark.parametrize( 'chunksize', [1, 2]) @pytest.mark.parametrize( 'convert_categoricals', [False, True]) @pytest.mark.parametrize( 'convert_dates', [False, True]) def test_read_chunks_117(self, file, chunksize, convert_categoricals, convert_dates): fname = getattr(self, file) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed = read_stata( fname, convert_categoricals=convert_categoricals, convert_dates=convert_dates) itr = read_stata( fname, iterator=True, convert_categoricals=convert_categoricals, convert_dates=convert_dates) pos = 0 for j in range(5): with warnings.catch_warnings(record=True) as w: # noqa warnings.simplefilter("always") try: chunk = itr.read(chunksize) except StopIteration: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal( from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, check_categorical=False) pos += chunksize itr.close() def test_iterator(self): fname = self.dta3_117 parsed = read_stata(fname) with read_stata(fname, iterator=True) as itr: chunk = itr.read(5) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) with read_stata(fname, chunksize=5) as itr: chunk = list(itr) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0]) with read_stata(fname, iterator=True) as itr: chunk = itr.get_chunk(5) tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) with read_stata(fname, chunksize=5) as itr: chunk = itr.get_chunk() tm.assert_frame_equal(parsed.iloc[0:5, :], chunk) # GH12153 with read_stata(fname, chunksize=4) as itr: from_chunks = pd.concat(itr) tm.assert_frame_equal(parsed, from_chunks) @pytest.mark.parametrize( 'file', ['dta2_115', 'dta3_115', 'dta4_115', 'dta14_115', 'dta15_115', 'dta16_115', 'dta17_115', 'dta18_115', 'dta19_115', 'dta20_115']) @pytest.mark.parametrize( 'chunksize', [1, 2]) @pytest.mark.parametrize( 'convert_categoricals', [False, True]) @pytest.mark.parametrize( 'convert_dates', [False, True]) def test_read_chunks_115(self, file, chunksize, convert_categoricals, convert_dates): fname = getattr(self, file) # Read the whole file with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") parsed = read_stata( fname, convert_categoricals=convert_categoricals, convert_dates=convert_dates) # Compare to what we get when reading by chunk itr = read_stata( fname, iterator=True, convert_dates=convert_dates, convert_categoricals=convert_categoricals) pos = 0 for j in range(5): with warnings.catch_warnings(record=True) as w: # noqa warnings.simplefilter("always") try: chunk = itr.read(chunksize) except StopIteration: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal( from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, check_categorical=False) pos += chunksize itr.close() def test_read_chunks_columns(self): fname = self.dta3_117 columns = ['quarter', 'cpi', 'm1'] chunksize = 2 parsed = read_stata(fname, columns=columns) with read_stata(fname, iterator=True) as itr: pos = 0 for j in range(5): chunk = itr.read(chunksize, columns=columns) if chunk is None: break from_frame = parsed.iloc[pos:pos + chunksize, :] tm.assert_frame_equal(from_frame, chunk, check_dtype=False) pos += chunksize @pytest.mark.parametrize('version', [114, 117]) def test_write_variable_labels(self, version): # GH 13631, add support for writing variable labels original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) original.index.name = 'index' variable_labels = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'} with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels, version=version) with StataReader(path) as sr: read_labels = sr.variable_labels() expected_labels = {'index': '', 'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'} assert read_labels == expected_labels variable_labels['index'] = 'The Index' with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels, version=version) with StataReader(path) as sr: read_labels = sr.variable_labels() assert read_labels == variable_labels @pytest.mark.parametrize('version', [114, 117]) def test_invalid_variable_labels(self, version): original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) original.index.name = 'index' variable_labels = {'a': 'very long' * 10, 'b': 'City Exponent', 'c': 'City'} with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, variable_labels=variable_labels, version=version) variable_labels['a'] = u'invalid character Œ' with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, variable_labels=variable_labels, version=version) def test_write_variable_label_errors(self): original = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1.0, 3.0, 27.0, 81.0], 'c': ['Atlanta', 'Birmingham', 'Cincinnati', 'Detroit']}) values = [u'\u03A1', u'\u0391', u'\u039D', u'\u0394', u'\u0391', u'\u03A3'] variable_labels_utf8 = {'a': 'City Rank', 'b': 'City Exponent', 'c': u''.join(values)} with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_utf8) variable_labels_long = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'A very, very, very long variable label ' 'that is too long for Stata which means ' 'that it has more than 80 characters'} with pytest.raises(ValueError): with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels_long) def test_default_date_conversion(self): # GH 12259 dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with tm.ensure_clean() as path: original.to_stata(path, write_index=False) reread = read_stata(path, convert_dates=True) tm.assert_frame_equal(original, reread) original.to_stata(path, write_index=False, convert_dates={'dates': 'tc'}) direct = read_stata(path, convert_dates=True) tm.assert_frame_equal(reread, direct) dates_idx = original.columns.tolist().index('dates') original.to_stata(path, write_index=False, convert_dates={dates_idx: 'tc'}) direct = read_stata(path, convert_dates=True) tm.assert_frame_equal(reread, direct) def test_unsupported_type(self): original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path) def test_unsupported_datetype(self): dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path, convert_dates={'dates': 'tC'}) dates = pd.date_range('1-1-1990', periods=3, tz='Asia/Hong_Kong') original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with pytest.raises(NotImplementedError): with tm.ensure_clean() as path: original.to_stata(path) def test_repeated_column_labels(self): # GH 13923 with pytest.raises(ValueError) as cm: read_stata(self.dta23, convert_categoricals=True) assert 'wolof' in cm.exception def test_stata_111(self): # 111 is an old version but still used by current versions of # SAS when exporting to Stata format. We do not know of any # on-line documentation for this version. df = read_stata(self.dta24_111) original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0], 'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6], 'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3], 'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h', 'i', 'j']}) original = original[['y', 'x', 'w', 'z']] tm.assert_frame_equal(original, df) def test_out_of_range_double(self): # GH 14618 df = DataFrame({'ColumnOk': [0.0, np.finfo(np.double).eps, 4.49423283715579e+307], 'ColumnTooBig': [0.0, np.finfo(np.double).eps, np.finfo(np.double).max]}) with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: df.to_stata(path) assert 'ColumnTooBig' in cm.exception df.loc[2, 'ColumnTooBig'] = np.inf with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: df.to_stata(path) assert 'ColumnTooBig' in cm.exception assert 'infinity' in cm.exception def test_out_of_range_float(self): original = DataFrame({'ColumnOk': [0.0, np.finfo(np.float32).eps, np.finfo(np.float32).max / 10.0], 'ColumnTooBig': [0.0, np.finfo(np.float32).eps, np.finfo(np.float32).max]}) original.index.name = 'index' for col in original: original[col] = original[col].astype(np.float32) with tm.ensure_clean() as path: original.to_stata(path) reread = read_stata(path) original['ColumnTooBig'] = original['ColumnTooBig'].astype( np.float64) tm.assert_frame_equal(original, reread.set_index('index')) original.loc[2, 'ColumnTooBig'] = np.inf with pytest.raises(ValueError) as cm: with tm.ensure_clean() as path: original.to_stata(path) assert 'ColumnTooBig' in cm.exception assert 'infinity' in cm.exception def test_path_pathlib(self): df = tm.makeDataFrame() df.index.name = 'index' reader = lambda x: read_stata(x).set_index('index') result = tm.round_trip_pathlib(df.to_stata, reader) tm.assert_frame_equal(df, result) def test_pickle_path_localpath(self): df = tm.makeDataFrame() df.index.name = 'index' reader = lambda x: read_stata(x).set_index('index') result = tm.round_trip_localpath(df.to_stata, reader) tm.assert_frame_equal(df, result) @pytest.mark.parametrize( 'write_index', [True, False]) def test_value_labels_iterator(self, write_index): # GH 16923 d = {'A': ['B', 'E', 'C', 'A', 'E']} df = pd.DataFrame(data=d) df['A'] = df['A'].astype('category') with tm.ensure_clean() as path: df.to_stata(path, write_index=write_index) with pd.read_stata(path, iterator=True) as dta_iter: value_labels = dta_iter.value_labels() assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}} def test_set_index(self): # GH 17328 df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: df.to_stata(path) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) @pytest.mark.parametrize( 'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr']) def test_date_parsing_ignores_format_details(self, column): # GH 17797 # # Test that display formats are ignored when determining if a numeric # column is a date value. # # All date types are stored as numbers and format associated with the # column denotes both the type of the date and the display format. # # STATA supports 9 date types which each have distinct units. We test 7 # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that # accounts for leap seconds and %tb relies on STATAs business calendar. df = read_stata(self.stata_dates) unformatted = df.loc[0, column] formatted = df.loc[0, column + "_fmt"] assert unformatted == formatted def test_writer_117(self): original = DataFrame(data=[['string', 'object', 1, 1, 1, 1.1, 1.1, np.datetime64('2003-12-25'), 'a', 'a' * 2045, 'a' * 5000, 'a'], ['string-1', 'object-1', 1, 1, 1, 1.1, 1.1, np.datetime64('2003-12-26'), 'b', 'b' * 2045, '', ''] ], columns=['string', 'object', 'int8', 'int16', 'int32', 'float32', 'float64', 'datetime', 's1', 's2045', 'srtl', 'forced_strl']) original['object'] = Series(original['object'], dtype=object) original['int8'] = Series(original['int8'], dtype=np.int8) original['int16'] = Series(original['int16'], dtype=np.int16) original['int32'] = original['int32'].astype(np.int32) original['float32'] = Series(original['float32'], dtype=np.float32) original.index.name = 'index' original.index = original.index.astype(np.int32) copy = original.copy() with tm.ensure_clean() as path: original.to_stata(path, convert_dates={'datetime': 'tc'}, convert_strl=['forced_strl'], version=117) written_and_read_again = self.read_dta(path) # original.index is np.int32, read index is np.int64 tm.assert_frame_equal(written_and_read_again.set_index('index'), original, check_index_type=False) tm.assert_frame_equal(original, copy) def test_convert_strl_name_swap(self): original = DataFrame([['a' * 3000, 'A', 'apple'], ['b' * 1000, 'B', 'banana']], columns=['long1' * 10, 'long', 1]) original.index.name = 'index' with tm.assert_produces_warning(pd.io.stata.InvalidColumnName): with tm.ensure_clean() as path: original.to_stata(path, convert_strl=['long', 1], version=117) reread = self.read_dta(path) reread = reread.set_index('index') reread.columns = original.columns tm.assert_frame_equal(reread, original, check_index_type=False) def test_invalid_date_conversion(self): # GH 12259 dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000), dt.datetime(2012, 12, 21, 12, 21, 12, 21000), dt.datetime(1776, 7, 4, 7, 4, 7, 4000)] original = pd.DataFrame({'nums': [1.0, 2.0, 3.0], 'strs': ['apple', 'banana', 'cherry'], 'dates': dates}) with tm.ensure_clean() as path: with pytest.raises(ValueError): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) @pytest.mark.parametrize('version', [114, 117]) def test_nonfile_writing(self, version): # GH 21041 bio = io.BytesIO() df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: df.to_stata(bio, version=version) bio.seek(0) with open(path, 'wb') as dta: dta.write(bio.read()) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) def test_gzip_writing(self): # writing version 117 requires seek and cannot be used with gzip df = tm.makeDataFrame() df.index.name = 'index' with tm.ensure_clean() as path: with gzip.GzipFile(path, 'wb') as gz: df.to_stata(gz, version=114) with gzip.GzipFile(path, 'rb') as gz: reread = pd.read_stata(gz, index_col='index') tm.assert_frame_equal(df, reread) def test_unicode_dta_118(self): unicode_df = self.read_dta(self.dta25_118) columns = ['utf8', 'latin1', 'ascii', 'utf8_strl', 'ascii_strl'] values = [[u'ραηδας', u'PÄNDÄS', 'p', u'ραηδας', 'p'], [u'ƤĀńĐąŜ', u'Ö', 'a', u'ƤĀńĐąŜ', 'a'], [u'ᴘᴀᴎᴅᴀS', u'Ü', 'n', u'ᴘᴀᴎᴅᴀS', 'n'], [' ', ' ', 'd', ' ', 'd'], [' ', '', 'a', ' ', 'a'], ['', '', 's', '', 's'], ['', '', ' ', '', ' ']] expected = pd.DataFrame(values, columns=columns) tm.assert_frame_equal(unicode_df, expected)
dsm054/pandas
pandas/tests/io/test_stata.py
pandas/io/sas/sas7bdat.py
from datetime import datetime from utils.appliance.implementations.ui import navigate_to NONE_GROUP = 'NONE' class Timelines(object): """ Represents Common UI Page for showing generated events of different Providers as a timeline. UI page contains several drop-down items which are doing filtering of displayed events. In this class, there are described several methods to change those filters. After each filter change, UI page is reloaded and the displayed events graphic is changed. And after each page reload, the displayed events are re-read by this class. The main purpose of this class is to check whether particular event is displayed or not in timelines page. Usage: timelines.change_interval('Days') timelines.select_event_category('Application') timelines.check_detailed_events(True) timelines.contains_event('hawkular_deployment.ok') """ def __init__(self, o): self._object = o self._events = [] self.reload() def change_event_type(self, value): self.timelines_view.filter.event_type.select_by_visible_text(value) self.timelines_view.filter.apply.click() self._reload_events() def change_interval(self, value): self.timelines_view.filter.time_range.select_by_visible_text(value) self.timelines_view.filter.apply.click() self._reload_events() def change_date(self, value): self.timelines_view.filter.time_position.select_by_visible_text(value) self.timelines_view.filter.apply.click() self._reload_events() def check_detailed_events(self, value): self.timelines_view.filter.detailed_events.fill(value) self.timelines_view.filter.apply.click() self._reload_events() def select_event_category(self, value): self.timelines_view.filter.event_category.select_by_visible_text(value) self.timelines_view.filter.apply.click() self._reload_events() def contains_event(self, event_type, date_after=datetime.min): """Checks whether list of events contains provided particular 'event_type' with data not earlier than provided 'date_after'. If 'date_after' is not provided, will use datetime.min. """ if date_after and not isinstance(date_after, datetime): raise KeyError("'date_after' should be an instance of date") for event in self._events: if event.event_type == event_type and datetime.strptime( event.date_time, '%Y-%m-%d %H:%M:%S %Z') >= date_after: return True return False def reload(self): self.timelines_view = navigate_to(self._object, 'Timelines') self._reload_events() def _reload_events(self): self._events = self.timelines_view.chart.get_events()
# -*- coding: utf-8 -*- """This module contains tests that exercise the canned VMware Automate stuff.""" import fauxfactory import pytest from textwrap import dedent from cfme import test_requirements from cfme.automate.buttons import ButtonGroup, Button from cfme.automate.explorer.domain import DomainCollection from cfme.common.vm import VM from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.web_ui import flash, toolbar from utils import testgen from utils.blockers import BZ from utils.log import logger from utils.wait import wait_for pytestmark = [ test_requirements.automate, pytest.mark.meta(server_roles="+automate"), pytest.mark.ignore_stream("upstream"), pytest.mark.tier(3)] def pytest_generate_tests(metafunc): argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [VMwareProvider], required_fields=[['provisioning', 'template']]) testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope='module') @pytest.fixture(scope="module") def domain_collection(): return DomainCollection() @pytest.yield_fixture(scope="module") def domain(request, domain_collection): domain = domain_collection.create(name=fauxfactory.gen_alphanumeric(), enabled=True) yield domain if domain.exists: domain.delete() @pytest.fixture(scope="module") def cls(request, domain): original_class = domain.collection\ .instantiate(name='ManageIQ')\ .namespaces.instantiate(name='System')\ .classes.instantiate(name='Request') original_class.copy_to(domain=domain) return domain.namespaces.instantiate(name='System').classes.instantiate(name='Request') @pytest.yield_fixture(scope="module") def testing_group(request): group_desc = fauxfactory.gen_alphanumeric() group = ButtonGroup( text=group_desc, hover=group_desc, type=ButtonGroup.VM_INSTANCE ) group.create() yield group group.delete_if_exists() @pytest.yield_fixture(scope="function") def testing_vm(request, setup_provider, provider): vm = VM.factory( "test_ae_hd_{}".format(fauxfactory.gen_alphanumeric()), provider, template_name=provider.data['full_template']['name'] ) try: vm.create_on_provider(find_in_cfme=True, allow_skip="default") yield vm finally: vm.delete_from_provider() if vm.exists: vm.delete() @pytest.mark.meta(blockers=[1211627, BZ(1311221, forced_streams=['5.5'])]) def test_vmware_vimapi_hotadd_disk( request, testing_group, provider, testing_vm, domain, cls): """ Tests hot adding a disk to vmware vm. This test exercises the ``VMware_HotAdd_Disk`` method, located in ``/Integration/VMware/VimApi`` Steps: * It creates an instance in ``System/Request`` that can be accessible from eg. a button. * Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``. The button shall belong in the VM and instance button group. * After the button is created, it goes to a VM's summary page, clicks the button. * The test waits until the capacity of disks is raised. Metadata: test_flag: hotdisk, provision """ meth = cls.methods.create( name='load_value_{}'.format(fauxfactory.gen_alpha()), script=dedent('''\ # Sets the capacity of the new disk. $evm.root['size'] = 1 # GB exit MIQ_OK ''')) request.addfinalizer(meth.delete_if_exists) # Instance that calls the method and is accessible from the button instance = cls.instances.create( name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()), fields={ "meth4": {'value': meth.name}, # To get the value "rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"}, }, ) request.addfinalizer(instance.delete_if_exists) # Button that will invoke the dialog and action button_name = fauxfactory.gen_alphanumeric() button = Button(group=testing_group, text=button_name, hover=button_name, system="Request", request=instance.name) request.addfinalizer(button.delete_if_exists) button.create() def _get_disk_capacity(): testing_vm.summary.reload() return testing_vm.summary.datastore_allocation_summary.total_allocation.value original_disk_capacity = _get_disk_capacity() logger.info('Initial disk allocation: %s', original_disk_capacity) toolbar.select(testing_group.text, button.text) flash.assert_no_errors() try: wait_for( lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5) finally: logger.info('End disk capacity: %s', _get_disk_capacity())
jteehan/cfme_tests
cfme/tests/automate/test_vmware_methods.py
cfme/web_ui/timelines.py
import os import sqlalchemy as sa from asyncpgsa import create_pool, pg from dotenv import find_dotenv, load_dotenv from sqlalchemy import Table from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.dialects.postgresql.dml import Insert from . import model metadata = sa.MetaData() load_dotenv(find_dotenv()) pg_host = os.getenv('PUCKDB_DB_HOST') pg_port = int(os.getenv('PUCKDB_DB_PORT', '5432')) pg_database = os.getenv('PUCKDB_DB_DATABASE') pg_user = os.getenv('PUCKDB_DB_USER') pg_pass = os.getenv('PUCKDB_DB_PASSWORD') player_tbl = sa.Table('player', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('first_name', sa.String), sa.Column('last_name', sa.String), sa.Column('position', sa.Enum(model.PlayerPosition, name='player_position')), sa.Column('handedness', sa.Enum(model.PlayerHandedness, name='player_handedness')), sa.Column('height', sa.Float), sa.Column('weight', sa.SmallInteger), sa.Column('captain', sa.Boolean), sa.Column('alternate_captain', sa.Boolean), sa.Column('birth_city', sa.String), sa.Column('birth_country', sa.String), sa.Column('birth_date', sa.Date), sa.Column('birth_state_province', sa.String), sa.Column('nationality', sa.String) ) game_tbl = sa.Table('game', metadata, sa.Column('id', sa.BigInteger, primary_key=True), sa.Column('version', sa.BigInteger, primary_key=True), sa.Column('season', sa.Integer, nullable=False), sa.Column('type', sa.Enum(model.GameType, name='game_type')), sa.Column('away', sa.SmallInteger, sa.ForeignKey('team.id'), nullable=False), sa.Column('home', sa.SmallInteger, sa.ForeignKey('team.id'), nullable=False), sa.Column('date_start', sa.DateTime(timezone=True), index=True), sa.Column('date_end', sa.DateTime(timezone=True), nullable=True), sa.Column('first_star', sa.Integer, sa.ForeignKey('player.id'), nullable=True), sa.Column('second_star', sa.Integer, sa.ForeignKey('player.id'), nullable=True), sa.Column('third_star', sa.Integer, sa.ForeignKey('player.id'), nullable=True) ) team_tbl = sa.Table('team', metadata, sa.Column('id', sa.SmallInteger, primary_key=True), sa.Column('name', sa.String), sa.Column('team_name', sa.String), sa.Column('abbreviation', sa.String), sa.Column('city', sa.String) ) event_tbl = sa.Table('event', metadata, sa.Column('game', sa.BigInteger, nullable=False, primary_key=True), sa.Column('version', sa.BigInteger, primary_key=True), sa.Column('id', sa.Integer, nullable=False, primary_key=True), sa.Column('team', sa.SmallInteger, sa.ForeignKey('team.id'), nullable=False), sa.Column('type', sa.Enum(model.EventType, name='event_type'), nullable=False), sa.Column('date', sa.DateTime(timezone=True), nullable=False), sa.Column('shot_type', sa.Enum(model.ShotType, name='shot_type')), sa.Column('period', sa.SmallInteger, nullable=False), sa.Column('location_x', sa.Float, nullable=True), sa.Column('location_y', sa.Float, nullable=True), sa.ForeignKeyConstraint(('game', 'version'), ['game.id', 'game.version']) ) async def setup(database: str = None): await pg.init( host=pg_host, port=pg_port, database=database or pg_database, user=pg_user, password=pg_pass, min_size=5, max_size=10 ) async def get_pool(database: str = None): return await create_pool( host=pg_host, port=pg_port, database=database or pg_database, user=pg_user, password=pg_pass, min_size=5, max_size=10 ) def upsert(table: Table, data: dict, update_on_conflict: bool = False) -> Insert: insert_data = pg_insert(table).values( **data ) if update_on_conflict: return insert_data.on_conflict_do_update( constraint=table.primary_key, set_=data ) return insert_data.on_conflict_do_nothing( constraint=table.primary_key ) def drop(database: str = None): engine = sa.create_engine(get_connection_str(database)) metadata.drop_all(engine) def get_connection_str(database: str = None) -> str: return f'postgresql+pg8000://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{database or pg_database}'
import pytest from puckdb import model class TestModelEvents: def test_type_parser(self): assert model.EventType.blocked_shot == model.parse_enum(model.EventType, 'BLOCKED_SHOT') assert model.EventType.shot == model.parse_enum(model.EventType, 'SHOT') with pytest.raises(ValueError) as excinfo: model.parse_enum(model.EventType, 'GAME_SCHEDULED') # not currently tracked with pytest.raises(ValueError) as excinfo: model.parse_enum(model.EventType, 'NON_SENSE')
aaront/puckdb
tests/test_model.py
puckdb/db.py
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import itertools import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_warns_message from sklearn import datasets from sklearn.covariance import empirical_covariance, MinCovDet from sklearn.covariance import fast_mcd X = datasets.load_iris().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_mcd(): # Tests the FastMCD algorithm implementation # Small data set # test without outliers (random independent normal data) launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80) # test with a contaminated data set (medium contamination) launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70) # test with a contaminated data set (strong contamination) launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50) # Medium data set launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540) # Large data set launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870) # 1D data set launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350) def test_fast_mcd_on_invalid_input(): X = np.arange(100) assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead', fast_mcd, X) def test_mcd_class_on_invalid_input(): X = np.arange(100) mcd = MinCovDet() assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead', mcd.fit, X) def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support): rand_gen = np.random.RandomState(0) data = rand_gen.randn(n_samples, n_features) # add some outliers outliers_index = rand_gen.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) data[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False pure_data = data[inliers_mask] # compute MCD by fitting an object mcd_fit = MinCovDet(random_state=rand_gen).fit(data) T = mcd_fit.location_ S = mcd_fit.covariance_ H = mcd_fit.support_ # compare with the estimates learnt from the inliers error_location = np.mean((pure_data.mean(0) - T) ** 2) assert(error_location < tol_loc) error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) assert(error_cov < tol_cov) assert(np.sum(H) >= tol_support) assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) def test_mcd_issue1127(): # Check that the code does not break with X.shape = (3, 1) # (i.e. n_support = n_samples) rnd = np.random.RandomState(0) X = rnd.normal(size=(3, 1)) mcd = MinCovDet() mcd.fit(X) def test_mcd_issue3367(): # Check that MCD completes when the covariance matrix is singular # i.e. one of the rows and columns are all zeros rand_gen = np.random.RandomState(0) # Think of these as the values for X and Y -> 10 values between -5 and 5 data_values = np.linspace(-5, 5, 10).tolist() # Get the cartesian product of all possible coordinate pairs from above set data = np.array(list(itertools.product(data_values, data_values))) # Add a third column that's all zeros to make our data a set of point # within a plane, which means that the covariance matrix will be singular data = np.hstack((data, np.zeros((data.shape[0], 1)))) # The below line of code should raise an exception if the covariance matrix # is singular. As a further test, since we have points in XYZ, the # principle components (Eigenvectors) of these directly relate to the # geometry of the points. Since it's a plane, we should be able to test # that the Eigenvector that corresponds to the smallest Eigenvalue is the # plane normal, specifically [0, 0, 1], since everything is in the XY plane # (as I've set it up above). To do this one would start by: # # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) # normal = evecs[:, np.argmin(evals)] # # After which we need to assert that our `normal` is equal to [0, 0, 1]. # Do note that there is floating point error associated with this, so it's # best to subtract the two and then compare some small tolerance (e.g. # 1e-12). MinCovDet(random_state=rand_gen).fit(data) def test_mcd_support_covariance_is_zero(): # Check that MCD returns a ValueError with informative message when the # covariance of the support data is equal to 0. X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1]) X_1 = X_1.reshape(-1, 1) X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3]) X_2 = X_2.reshape(-1, 1) msg = ('The covariance matrix of the support data is equal to 0, try to ' 'increase support_fraction') for X in [X_1, X_2]: assert_raise_message(ValueError, msg, MinCovDet().fit, X) def test_mcd_increasing_det_warning(): # Check that a warning is raised if we observe increasing determinants # during the c_step. In theory the sequence of determinants should be # decreasing. Increasing determinants are likely due to ill-conditioned # covariance matrices that result in poor precision matrices. X = [[5.1, 3.5, 1.4, 0.2], [4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2], [4.6, 3.1, 1.5, 0.2], [5.0, 3.6, 1.4, 0.2], [4.6, 3.4, 1.4, 0.3], [5.0, 3.4, 1.5, 0.2], [4.4, 2.9, 1.4, 0.2], [4.9, 3.1, 1.5, 0.1], [5.4, 3.7, 1.5, 0.2], [4.8, 3.4, 1.6, 0.2], [4.8, 3.0, 1.4, 0.1], [4.3, 3.0, 1.1, 0.1], [5.1, 3.5, 1.4, 0.3], [5.7, 3.8, 1.7, 0.3], [5.4, 3.4, 1.7, 0.2], [4.6, 3.6, 1.0, 0.2], [5.0, 3.0, 1.6, 0.2], [5.2, 3.5, 1.5, 0.2]] mcd = MinCovDet(random_state=1) assert_warns_message(RuntimeWarning, "Determinant has increased", mcd.fit, X)
from __future__ import division import pytest import numpy as np from scipy import sparse from scipy.stats import kstest import io from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_boston from sklearn.impute import MissingIndicator from sklearn.impute import SimpleImputer, IterativeImputer from sklearn.dummy import DummyRegressor from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV from sklearn.pipeline import Pipeline from sklearn.pipeline import make_union from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "sparse = {0}" % (strategy, missing_values) assert_ae = assert_array_equal if X.dtype.kind == 'f' or X_true.dtype.kind == 'f': assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values, strategy=strategy) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ['mean', 'median', 'most_frequent', "constant"]) def test_imputation_shape(strategy): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["const", 101, None]) def test_imputation_error_invalid_strategy(strategy): X = np.ones((3, 5)) X[0, 0] = np.nan with pytest.raises(ValueError, match=str(strategy)): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan with pytest.warns(UserWarning, match="Deleting"): imputer = SimpleImputer(strategy=strategy, verbose=True) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) def test_imputation_error_sparse_0(strategy): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = sparse.csc_matrix(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v))))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) with pytest.raises(ValueError, match="non-numeric data"): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array([ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array([ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object) X_true = np.array([ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"] ], dtype=object) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value with pytest.raises(ValueError, match="imputing numerical"): imputer = SimpleImputer(missing_values=missing_value, strategy="constant", fill_value="x") imputer.fit_transform(X) def test_imputation_constant_integer(): # Test imputation using the constant strategy on integers X = np.array([ [-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1] ]) X_true = np.array([ [0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0] ]) imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray]) def test_imputation_constant_float(array_constructor): # Test imputation using the constant strategy on floats X = np.array([ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan] ]) X_true = np.array([ [-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1] ]) X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer(strategy="constant", fill_value=-1) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_constant_object(marker): # Test imputation using the constant strategy on objects X = np.array([ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker] ], dtype=object) X_true = np.array([ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"] ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="constant", fill_value="missing") X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_constant_pandas(dtype): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"] ], dtype=object) imputer = SimpleImputer(strategy="constant") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize('Imputer', (SimpleImputer, IterativeImputer)) def test_imputation_missing_value_in_test_array(Imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = Imputer(add_indicator=True) imputer.fit(train).transform(test) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline([('imputer', SimpleImputer(missing_values=missing_values)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"] } Y = sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ['random', 'roman', 'ascending', 'descending', 'arabic'] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer(missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert (len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_) if imputation_order == 'roman': assert np.all(ordered_idx[:d-1] == np.arange(1, d)) elif imputation_order == 'arabic': assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1)) elif imputation_order == 'random': ordered_idx_round_1 = ordered_idx[:d-1] ordered_idx_round_2 = ordered_idx[d-1:] assert ordered_idx_round_1 != ordered_idx_round_2 elif 'ending' in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, estimator=estimator, random_state=rng) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = (type(estimator) if estimator is not None else type(BayesianRidge())) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer(missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order='random', random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range pytest.importorskip("scipy", minversion="0.17.0") rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer(min_value=0, max_value=0.5, sample_posterior=True, random_state=rng) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, \ "The posterior does appear to be normal" @pytest.mark.parametrize( "strategy", ["mean", "median", "most_frequent"] ) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer(missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose(imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]) def test_iterative_imputer_transform_stochasticity(): pytest.importorskip("scipy", minversion="0.17.0") rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng1) imputer2 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng2) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize( "rank", [3, 5] ) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) @pytest.mark.parametrize("max_iter, tol, error_type, warning", [ (-1, 1e-3, ValueError, 'should be a positive integer'), (1, -1e-3, ValueError, 'should be a non-negative float') ]) def test_iterative_imputer_error_param(max_iter, tol, error_type, warning): X = np.zeros((100, 2)) imputer = IterativeImputer(max_iter=max_iter, tol=tol) with pytest.raises(error_type, match=warning): imputer.fit_transform(X) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer(max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer(max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_boston(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with pytest.warns(None) as record: X_fill = imputer.fit_transform(X, y) assert not record.list assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "X_fit, X_trans, params, msg_err", [(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]), {'features': 'missing-only', 'sparse': 'auto'}, 'have missing values in transform but have no missing values in fit'), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'random', 'sparse': 'auto'}, "'features' has to be either 'missing-only' or 'all'"), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'all', 'sparse': 'random'}, "'sparse' has to be a boolean or 'auto'"), (np.array([['a', 'b'], ['c', 'a']], dtype=str), np.array([['a', 'b'], ['c', 'a']], dtype=str), {}, "MissingIndicator does not support data with dtype")] ) def test_missing_indicator_error(X_fit, X_trans, params, msg_err): indicator = MissingIndicator(missing_values=-1) indicator.set_params(**params) with pytest.raises(ValueError, match=msg_err): indicator.fit(X_fit).transform(X_trans) @pytest.mark.parametrize( "missing_values, dtype, arr_type", [(np.nan, np.float64, np.array), (0, np.int32, np.array), (-1, np.int32, np.array), (np.nan, np.float64, sparse.csc_matrix), (-1, np.int32, sparse.csc_matrix), (np.nan, np.float64, sparse.csr_matrix), (-1, np.int32, sparse.csr_matrix), (np.nan, np.float64, sparse.coo_matrix), (-1, np.int32, sparse.coo_matrix), (np.nan, np.float64, sparse.lil_matrix), (-1, np.int32, sparse.lil_matrix), (np.nan, np.float64, sparse.bsr_matrix), (-1, np.int32, sparse.bsr_matrix) ]) @pytest.mark.parametrize( "param_features, n_features, features_indices", [('missing-only', 3, np.array([0, 1, 2])), ('all', 3, np.array([0, 1, 2]))]) def test_missing_indicator_new(missing_values, arr_type, dtype, param_features, n_features, features_indices): X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) # convert the input to the right array format and right dtype X_fit = arr_type(X_fit).astype(dtype) X_trans = arr_type(X_trans).astype(dtype) X_fit_expected = X_fit_expected.astype(dtype) X_trans_expected = X_trans_expected.astype(dtype) indicator = MissingIndicator(missing_values=missing_values, features=param_features, sparse=False) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) assert X_fit_mask.shape[1] == n_features assert X_trans_mask.shape[1] == n_features assert_array_equal(indicator.features_, features_indices) assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) assert X_fit_mask.dtype == bool assert X_trans_mask.dtype == bool assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) indicator.set_params(sparse=True) X_fit_mask_sparse = indicator.fit_transform(X_fit) X_trans_mask_sparse = indicator.transform(X_trans) assert X_fit_mask_sparse.dtype == bool assert X_trans_mask_sparse.dtype == bool assert X_fit_mask_sparse.format == 'csc' assert X_trans_mask_sparse.format == 'csc' assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) @pytest.mark.parametrize( "arr_type", [sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix]) def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): # test for sparse input and missing_value == 0 missing_values = 0 X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) # convert the input to the right array format X_fit_sparse = arr_type(X_fit) X_trans_sparse = arr_type(X_trans) indicator = MissingIndicator(missing_values=missing_values) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.fit_transform(X_fit_sparse) indicator.fit_transform(X_fit) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.transform(X_trans_sparse) @pytest.mark.parametrize("param_sparse", [True, False, 'auto']) @pytest.mark.parametrize("missing_values, arr_type", [(np.nan, np.array), (0, np.array), (np.nan, sparse.csc_matrix), (np.nan, sparse.csr_matrix), (np.nan, sparse.coo_matrix), (np.nan, sparse.lil_matrix) ]) def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): # check the format of the output with different sparse parameter X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit = arr_type(X_fit).astype(np.float64) X_trans = arr_type(X_trans).astype(np.float64) indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) if param_sparse is True: assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' elif param_sparse == 'auto' and missing_values == 0: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) elif param_sparse is False: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) else: if sparse.issparse(X_fit): assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' else: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) def test_missing_indicator_string(): X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object) indicator = MissingIndicator(missing_values='a', features='all') X_trans = indicator.fit_transform(X) assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) @pytest.mark.parametrize( "X, missing_values, X_trans_exp", [(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a', np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[np.nan, 1.], [1., np.nan]]), np.nan, np.array([[1., 1., True, False], [1., 1., False, True]])), (np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[None, 'b'], ['b', None]], dtype=object), None, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object))] ) def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): trans = make_union( SimpleImputer(missing_values=missing_values, strategy='most_frequent'), MissingIndicator(missing_values=missing_values) ) X_trans = trans.fit_transform(X) assert_array_equal(X_trans, X_trans_exp) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) @pytest.mark.parametrize( "imputer_missing_values, missing_value, err_msg", [("NaN", np.nan, "Input contains NaN"), ("-1", -1, "types are expected to be both numerical.")]) def test_inconsistent_dtype_X_missing_values(imputer_constructor, imputer_missing_values, missing_value, err_msg): # regression test for issue #11390. Comparison between incoherent dtype # for X and missing_values was not raising a proper error. rng = np.random.RandomState(42) X = rng.randn(10, 10) X[0, 0] = missing_value imputer = imputer_constructor(missing_values=imputer_missing_values) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(X) def test_missing_indicator_no_missing(): # check that all features are dropped if there are no missing values when # features='missing-only' (#13491) X = np.array([[1, 1], [1, 1]]) mi = MissingIndicator(features='missing-only', missing_values=-1) Xt = mi.fit_transform(X) assert Xt.shape[1] == 0 def test_missing_indicator_sparse_no_explicit_zeros(): # Check that non missing values don't become explicit zeros in the mask # generated by missing indicator when X is sparse. (#13491) X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) mi = MissingIndicator(features='all', missing_values=1) Xt = mi.fit_transform(X) assert Xt.getnnz() == Xt.sum() @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputers_add_indicator(marker, imputer_constructor): X = np.array([ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4] ]) X_true_indicator = np.array([ [1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.] ]) imputer = imputer_constructor(missing_values=marker, add_indicator=True) X_trans = imputer.fit(X).transform(X) # The test is for testing the indicator, # that's why we're looking at the last 4 columns only. assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputer_without_indicator(imputer_constructor): X = np.array([[1, 1], [1, 1]]) imputer = imputer_constructor() imputer.fit(X) assert imputer.indicator_ is None @pytest.mark.parametrize( "arr_type", [ sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix ] ) def test_simple_imputation_add_indicator_sparse_matrix(arr_type): X_sparse = arr_type([ [np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9] ]) X_true = np.array([ [3., 1., 5., 1., 0., 0.], [2., 2., 1., 0., 1., 0.], [6., 3., 5., 0., 0., 1.], [1., 2., 9., 0., 0., 0.], ]) imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) X_trans = imputer.fit_transform(X_sparse) assert sparse.issparse(X_trans) assert X_trans.shape == X_true.shape assert_allclose(X_trans.toarray(), X_true)
chrsrds/scikit-learn
sklearn/impute/tests/test_impute.py
sklearn/covariance/tests/test_robust_covariance.py
def test_old_pickle(tmpdir): import joblib # Check that a pickle that references sklearn.external.joblib can load f = tmpdir.join('foo.pkl') f.write(b'\x80\x02csklearn.externals.joblib.numpy_pickle\nNumpyArrayWrappe' b'r\nq\x00)\x81q\x01}q\x02(U\x05dtypeq\x03cnumpy\ndtype\nq\x04U' b'\x02i8q\x05K\x00K\x01\x87q\x06Rq\x07(K\x03U\x01<q\x08NNNJ\xff' b'\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\tbU\x05shapeq\nK\x01\x85q' b'\x0bU\x05orderq\x0cU\x01Cq\rU\x08subclassq\x0ecnumpy\nndarray\nq' b'\x0fU\nallow_mmapq\x10\x88ub\x01\x00\x00\x00\x00\x00\x00\x00.', mode='wb') joblib.load(str(f))
from __future__ import division import pytest import numpy as np from scipy import sparse from scipy.stats import kstest import io from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_boston from sklearn.impute import MissingIndicator from sklearn.impute import SimpleImputer, IterativeImputer from sklearn.dummy import DummyRegressor from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV from sklearn.pipeline import Pipeline from sklearn.pipeline import make_union from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "sparse = {0}" % (strategy, missing_values) assert_ae = assert_array_equal if X.dtype.kind == 'f' or X_true.dtype.kind == 'f': assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values, strategy=strategy) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ['mean', 'median', 'most_frequent', "constant"]) def test_imputation_shape(strategy): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["const", 101, None]) def test_imputation_error_invalid_strategy(strategy): X = np.ones((3, 5)) X[0, 0] = np.nan with pytest.raises(ValueError, match=str(strategy)): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan with pytest.warns(UserWarning, match="Deleting"): imputer = SimpleImputer(strategy=strategy, verbose=True) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) def test_imputation_error_sparse_0(strategy): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = sparse.csc_matrix(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v))))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) with pytest.raises(ValueError, match="non-numeric data"): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array([ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array([ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object) X_true = np.array([ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"] ], dtype=object) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value with pytest.raises(ValueError, match="imputing numerical"): imputer = SimpleImputer(missing_values=missing_value, strategy="constant", fill_value="x") imputer.fit_transform(X) def test_imputation_constant_integer(): # Test imputation using the constant strategy on integers X = np.array([ [-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1] ]) X_true = np.array([ [0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0] ]) imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray]) def test_imputation_constant_float(array_constructor): # Test imputation using the constant strategy on floats X = np.array([ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan] ]) X_true = np.array([ [-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1] ]) X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer(strategy="constant", fill_value=-1) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_constant_object(marker): # Test imputation using the constant strategy on objects X = np.array([ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker] ], dtype=object) X_true = np.array([ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"] ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="constant", fill_value="missing") X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_constant_pandas(dtype): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"] ], dtype=object) imputer = SimpleImputer(strategy="constant") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize('Imputer', (SimpleImputer, IterativeImputer)) def test_imputation_missing_value_in_test_array(Imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = Imputer(add_indicator=True) imputer.fit(train).transform(test) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline([('imputer', SimpleImputer(missing_values=missing_values)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"] } Y = sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ['random', 'roman', 'ascending', 'descending', 'arabic'] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer(missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert (len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_) if imputation_order == 'roman': assert np.all(ordered_idx[:d-1] == np.arange(1, d)) elif imputation_order == 'arabic': assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1)) elif imputation_order == 'random': ordered_idx_round_1 = ordered_idx[:d-1] ordered_idx_round_2 = ordered_idx[d-1:] assert ordered_idx_round_1 != ordered_idx_round_2 elif 'ending' in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, estimator=estimator, random_state=rng) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = (type(estimator) if estimator is not None else type(BayesianRidge())) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer(missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order='random', random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range pytest.importorskip("scipy", minversion="0.17.0") rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer(min_value=0, max_value=0.5, sample_posterior=True, random_state=rng) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, \ "The posterior does appear to be normal" @pytest.mark.parametrize( "strategy", ["mean", "median", "most_frequent"] ) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer(missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose(imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]) def test_iterative_imputer_transform_stochasticity(): pytest.importorskip("scipy", minversion="0.17.0") rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng1) imputer2 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng2) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize( "rank", [3, 5] ) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) @pytest.mark.parametrize("max_iter, tol, error_type, warning", [ (-1, 1e-3, ValueError, 'should be a positive integer'), (1, -1e-3, ValueError, 'should be a non-negative float') ]) def test_iterative_imputer_error_param(max_iter, tol, error_type, warning): X = np.zeros((100, 2)) imputer = IterativeImputer(max_iter=max_iter, tol=tol) with pytest.raises(error_type, match=warning): imputer.fit_transform(X) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer(max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer(max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_boston(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with pytest.warns(None) as record: X_fill = imputer.fit_transform(X, y) assert not record.list assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "X_fit, X_trans, params, msg_err", [(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]), {'features': 'missing-only', 'sparse': 'auto'}, 'have missing values in transform but have no missing values in fit'), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'random', 'sparse': 'auto'}, "'features' has to be either 'missing-only' or 'all'"), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'all', 'sparse': 'random'}, "'sparse' has to be a boolean or 'auto'"), (np.array([['a', 'b'], ['c', 'a']], dtype=str), np.array([['a', 'b'], ['c', 'a']], dtype=str), {}, "MissingIndicator does not support data with dtype")] ) def test_missing_indicator_error(X_fit, X_trans, params, msg_err): indicator = MissingIndicator(missing_values=-1) indicator.set_params(**params) with pytest.raises(ValueError, match=msg_err): indicator.fit(X_fit).transform(X_trans) @pytest.mark.parametrize( "missing_values, dtype, arr_type", [(np.nan, np.float64, np.array), (0, np.int32, np.array), (-1, np.int32, np.array), (np.nan, np.float64, sparse.csc_matrix), (-1, np.int32, sparse.csc_matrix), (np.nan, np.float64, sparse.csr_matrix), (-1, np.int32, sparse.csr_matrix), (np.nan, np.float64, sparse.coo_matrix), (-1, np.int32, sparse.coo_matrix), (np.nan, np.float64, sparse.lil_matrix), (-1, np.int32, sparse.lil_matrix), (np.nan, np.float64, sparse.bsr_matrix), (-1, np.int32, sparse.bsr_matrix) ]) @pytest.mark.parametrize( "param_features, n_features, features_indices", [('missing-only', 3, np.array([0, 1, 2])), ('all', 3, np.array([0, 1, 2]))]) def test_missing_indicator_new(missing_values, arr_type, dtype, param_features, n_features, features_indices): X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) # convert the input to the right array format and right dtype X_fit = arr_type(X_fit).astype(dtype) X_trans = arr_type(X_trans).astype(dtype) X_fit_expected = X_fit_expected.astype(dtype) X_trans_expected = X_trans_expected.astype(dtype) indicator = MissingIndicator(missing_values=missing_values, features=param_features, sparse=False) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) assert X_fit_mask.shape[1] == n_features assert X_trans_mask.shape[1] == n_features assert_array_equal(indicator.features_, features_indices) assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) assert X_fit_mask.dtype == bool assert X_trans_mask.dtype == bool assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) indicator.set_params(sparse=True) X_fit_mask_sparse = indicator.fit_transform(X_fit) X_trans_mask_sparse = indicator.transform(X_trans) assert X_fit_mask_sparse.dtype == bool assert X_trans_mask_sparse.dtype == bool assert X_fit_mask_sparse.format == 'csc' assert X_trans_mask_sparse.format == 'csc' assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) @pytest.mark.parametrize( "arr_type", [sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix]) def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): # test for sparse input and missing_value == 0 missing_values = 0 X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) # convert the input to the right array format X_fit_sparse = arr_type(X_fit) X_trans_sparse = arr_type(X_trans) indicator = MissingIndicator(missing_values=missing_values) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.fit_transform(X_fit_sparse) indicator.fit_transform(X_fit) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.transform(X_trans_sparse) @pytest.mark.parametrize("param_sparse", [True, False, 'auto']) @pytest.mark.parametrize("missing_values, arr_type", [(np.nan, np.array), (0, np.array), (np.nan, sparse.csc_matrix), (np.nan, sparse.csr_matrix), (np.nan, sparse.coo_matrix), (np.nan, sparse.lil_matrix) ]) def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): # check the format of the output with different sparse parameter X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit = arr_type(X_fit).astype(np.float64) X_trans = arr_type(X_trans).astype(np.float64) indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) if param_sparse is True: assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' elif param_sparse == 'auto' and missing_values == 0: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) elif param_sparse is False: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) else: if sparse.issparse(X_fit): assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' else: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) def test_missing_indicator_string(): X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object) indicator = MissingIndicator(missing_values='a', features='all') X_trans = indicator.fit_transform(X) assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) @pytest.mark.parametrize( "X, missing_values, X_trans_exp", [(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a', np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[np.nan, 1.], [1., np.nan]]), np.nan, np.array([[1., 1., True, False], [1., 1., False, True]])), (np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[None, 'b'], ['b', None]], dtype=object), None, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object))] ) def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): trans = make_union( SimpleImputer(missing_values=missing_values, strategy='most_frequent'), MissingIndicator(missing_values=missing_values) ) X_trans = trans.fit_transform(X) assert_array_equal(X_trans, X_trans_exp) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) @pytest.mark.parametrize( "imputer_missing_values, missing_value, err_msg", [("NaN", np.nan, "Input contains NaN"), ("-1", -1, "types are expected to be both numerical.")]) def test_inconsistent_dtype_X_missing_values(imputer_constructor, imputer_missing_values, missing_value, err_msg): # regression test for issue #11390. Comparison between incoherent dtype # for X and missing_values was not raising a proper error. rng = np.random.RandomState(42) X = rng.randn(10, 10) X[0, 0] = missing_value imputer = imputer_constructor(missing_values=imputer_missing_values) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(X) def test_missing_indicator_no_missing(): # check that all features are dropped if there are no missing values when # features='missing-only' (#13491) X = np.array([[1, 1], [1, 1]]) mi = MissingIndicator(features='missing-only', missing_values=-1) Xt = mi.fit_transform(X) assert Xt.shape[1] == 0 def test_missing_indicator_sparse_no_explicit_zeros(): # Check that non missing values don't become explicit zeros in the mask # generated by missing indicator when X is sparse. (#13491) X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) mi = MissingIndicator(features='all', missing_values=1) Xt = mi.fit_transform(X) assert Xt.getnnz() == Xt.sum() @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputers_add_indicator(marker, imputer_constructor): X = np.array([ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4] ]) X_true_indicator = np.array([ [1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.] ]) imputer = imputer_constructor(missing_values=marker, add_indicator=True) X_trans = imputer.fit(X).transform(X) # The test is for testing the indicator, # that's why we're looking at the last 4 columns only. assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputer_without_indicator(imputer_constructor): X = np.array([[1, 1], [1, 1]]) imputer = imputer_constructor() imputer.fit(X) assert imputer.indicator_ is None @pytest.mark.parametrize( "arr_type", [ sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix ] ) def test_simple_imputation_add_indicator_sparse_matrix(arr_type): X_sparse = arr_type([ [np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9] ]) X_true = np.array([ [3., 1., 5., 1., 0., 0.], [2., 2., 1., 0., 1., 0.], [6., 3., 5., 0., 0., 1.], [1., 2., 9., 0., 0., 0.], ]) imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) X_trans = imputer.fit_transform(X_sparse) assert sparse.issparse(X_trans) assert X_trans.shape == X_true.shape assert_allclose(X_trans.toarray(), X_true)
chrsrds/scikit-learn
sklearn/impute/tests/test_impute.py
sklearn/tests/test_site_joblib.py
import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.datasets import make_blobs from sklearn.utils.class_weight import compute_class_weight from sklearn.utils.class_weight import compute_sample_weight from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message def test_compute_class_weight(): # Test (and demo) compute_class_weight. y = np.asarray([2, 2, 2, 3, 3, 4]) classes = np.unique(y) cw = compute_class_weight("balanced", classes, y) # total effect of samples is preserved class_counts = np.bincount(y)[2:] assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert cw[0] < cw[1] < cw[2] def test_compute_class_weight_not_present(): # Raise error when y does not contain all class labels classes = np.arange(4) y = np.asarray([0, 0, 0, 1, 1, 2]) assert_raises(ValueError, compute_class_weight, "balanced", classes, y) # Fix exception in error message formatting when missing label is a string # https://github.com/scikit-learn/scikit-learn/issues/8312 assert_raise_message(ValueError, 'Class label label_not_present not present', compute_class_weight, {'label_not_present': 1.}, classes, y) # Raise error when y has items not in classes classes = np.arange(2) assert_raises(ValueError, compute_class_weight, "balanced", classes, y) assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y) def test_compute_class_weight_dict(): classes = np.arange(3) class_weights = {0: 1.0, 1: 2.0, 2: 3.0} y = np.asarray([0, 0, 1, 2]) cw = compute_class_weight(class_weights, classes, y) # When the user specifies class weights, compute_class_weights should just # return them. assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw) # When a class weight is specified that isn't in classes, a ValueError # should get raised msg = 'Class label 4 not present.' class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5} assert_raise_message(ValueError, msg, compute_class_weight, class_weights, classes, y) msg = 'Class label -1 not present.' class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0} assert_raise_message(ValueError, msg, compute_class_weight, class_weights, classes, y) def test_compute_class_weight_invariance(): # Test that results with class_weight="balanced" is invariant wrt # class imbalance if the number of samples is identical. # The test uses a balanced two class dataset with 100 datapoints. # It creates three versions, one where class 1 is duplicated # resulting in 150 points of class 1 and 50 of class 0, # one where there are 50 points in class 1 and 150 in class 0, # and one where there are 100 points of each class (this one is balanced # again). # With balancing class weights, all three should give the same model. X, y = make_blobs(centers=2, random_state=0) # create dataset where class 1 is duplicated twice X_1 = np.vstack([X] + [X[y == 1]] * 2) y_1 = np.hstack([y] + [y[y == 1]] * 2) # create dataset where class 0 is duplicated twice X_0 = np.vstack([X] + [X[y == 0]] * 2) y_0 = np.hstack([y] + [y[y == 0]] * 2) # duplicate everything X_ = np.vstack([X] * 2) y_ = np.hstack([y] * 2) # results should be identical logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1) logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0) logreg = LogisticRegression(class_weight="balanced").fit(X_, y_) assert_array_almost_equal(logreg1.coef_, logreg0.coef_) assert_array_almost_equal(logreg.coef_, logreg0.coef_) def test_compute_class_weight_balanced_negative(): # Test compute_class_weight when labels are negative # Test with balanced class labels. classes = np.array([-2, -1, 0]) y = np.asarray([-1, -1, 0, 0, -2, -2]) cw = compute_class_weight("balanced", classes, y) assert len(cw) == len(classes) assert_array_almost_equal(cw, np.array([1., 1., 1.])) # Test with unbalanced class labels. y = np.asarray([-1, 0, 0, -2, -2, -2]) cw = compute_class_weight("balanced", classes, y) assert len(cw) == len(classes) class_counts = np.bincount(y + 2) assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert_array_almost_equal(cw, [2. / 3, 2., 1.]) def test_compute_class_weight_balanced_unordered(): # Test compute_class_weight when classes are unordered classes = np.array([1, 0, 3]) y = np.asarray([1, 0, 0, 3, 3, 3]) cw = compute_class_weight("balanced", classes, y) class_counts = np.bincount(y)[classes] assert_almost_equal(np.dot(cw, class_counts), y.shape[0]) assert_array_almost_equal(cw, [2., 1., 2. / 3]) def test_compute_class_weight_default(): # Test for the case where no weight is given for a present class. # Current behaviour is to assign the unweighted classes a weight of 1. y = np.asarray([2, 2, 2, 3, 3, 4]) classes = np.unique(y) classes_len = len(classes) # Test for non specified weights cw = compute_class_weight(None, classes, y) assert len(cw) == classes_len assert_array_almost_equal(cw, np.ones(3)) # Tests for partly specified weights cw = compute_class_weight({2: 1.5}, classes, y) assert len(cw) == classes_len assert_array_almost_equal(cw, [1.5, 1., 1.]) cw = compute_class_weight({2: 1.5, 4: 0.5}, classes, y) assert len(cw) == classes_len assert_array_almost_equal(cw, [1.5, 1., 0.5]) def test_compute_sample_weight(): # Test (and demo) compute_sample_weight. # Test with balanced classes y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with user-defined weights sample_weight = compute_sample_weight({1: 2, 2: 1}, y) assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.]) # Test with column vector of balanced classes y = np.asarray([[1], [1], [1], [2], [2], [2]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with unbalanced classes y = np.asarray([1, 1, 1, 2, 2, 2, 3]) sample_weight = compute_sample_weight("balanced", y) expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333]) assert_array_almost_equal(sample_weight, expected_balanced, decimal=4) # Test with `None` weights sample_weight = compute_sample_weight(None, y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.]) # Test with multi-output of balanced classes y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with multi-output with user-defined weights y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y) assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.]) # Test with multi-output of unbalanced classes y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]]) sample_weight = compute_sample_weight("balanced", y) assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3) def test_compute_sample_weight_with_subsample(): # Test compute_sample_weight with subsamples specified. # Test with balanced classes and all samples present y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with column vector of balanced classes and all samples present y = np.asarray([[1], [1], [1], [2], [2], [2]]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.]) # Test with a subsample y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, range(4)) assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3, 2. / 3, 2., 2., 2.]) # Test with a bootstrap subsample y = np.asarray([1, 1, 1, 2, 2, 2]) sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3]) expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.]) assert_array_almost_equal(sample_weight, expected_balanced) # Test with a bootstrap subsample for multi-output y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3]) assert_array_almost_equal(sample_weight, expected_balanced ** 2) # Test with a missing class y = np.asarray([1, 1, 1, 2, 2, 2, 3]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.]) # Test with a missing class for multi-output y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]]) sample_weight = compute_sample_weight("balanced", y, range(6)) assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.]) def test_compute_sample_weight_errors(): # Test compute_sample_weight raises errors expected. # Invalid preset string y = np.asarray([1, 1, 1, 2, 2, 2]) y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]]) assert_raises(ValueError, compute_sample_weight, "ni", y) assert_raises(ValueError, compute_sample_weight, "ni", y, range(4)) assert_raises(ValueError, compute_sample_weight, "ni", y_) assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4)) # Not "balanced" for subsample assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y, range(4)) # Not a list or preset for multi-output assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_) # Incorrect length list for multi-output assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_) def test_compute_sample_weight_more_than_32(): # Non-regression smoke test for #12146 y = np.arange(50) # more than 32 distinct classes indices = np.arange(50) # use subsampling weight = compute_sample_weight('balanced', y, indices=indices) assert_array_almost_equal(weight, np.ones(y.shape[0]))
from __future__ import division import pytest import numpy as np from scipy import sparse from scipy.stats import kstest import io from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_boston from sklearn.impute import MissingIndicator from sklearn.impute import SimpleImputer, IterativeImputer from sklearn.dummy import DummyRegressor from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV from sklearn.pipeline import Pipeline from sklearn.pipeline import make_union from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "sparse = {0}" % (strategy, missing_values) assert_ae = assert_array_equal if X.dtype.kind == 'f' or X_true.dtype.kind == 'f': assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values, strategy=strategy) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ['mean', 'median', 'most_frequent', "constant"]) def test_imputation_shape(strategy): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["const", 101, None]) def test_imputation_error_invalid_strategy(strategy): X = np.ones((3, 5)) X[0, 0] = np.nan with pytest.raises(ValueError, match=str(strategy)): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan with pytest.warns(UserWarning, match="Deleting"): imputer = SimpleImputer(strategy=strategy, verbose=True) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) def test_imputation_error_sparse_0(strategy): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = sparse.csc_matrix(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v))))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) with pytest.raises(ValueError, match="non-numeric data"): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array([ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array([ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object) X_true = np.array([ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"] ], dtype=object) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value with pytest.raises(ValueError, match="imputing numerical"): imputer = SimpleImputer(missing_values=missing_value, strategy="constant", fill_value="x") imputer.fit_transform(X) def test_imputation_constant_integer(): # Test imputation using the constant strategy on integers X = np.array([ [-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1] ]) X_true = np.array([ [0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0] ]) imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray]) def test_imputation_constant_float(array_constructor): # Test imputation using the constant strategy on floats X = np.array([ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan] ]) X_true = np.array([ [-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1] ]) X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer(strategy="constant", fill_value=-1) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_constant_object(marker): # Test imputation using the constant strategy on objects X = np.array([ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker] ], dtype=object) X_true = np.array([ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"] ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="constant", fill_value="missing") X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_constant_pandas(dtype): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"] ], dtype=object) imputer = SimpleImputer(strategy="constant") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize('Imputer', (SimpleImputer, IterativeImputer)) def test_imputation_missing_value_in_test_array(Imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = Imputer(add_indicator=True) imputer.fit(train).transform(test) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline([('imputer', SimpleImputer(missing_values=missing_values)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"] } Y = sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ['random', 'roman', 'ascending', 'descending', 'arabic'] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer(missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert (len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_) if imputation_order == 'roman': assert np.all(ordered_idx[:d-1] == np.arange(1, d)) elif imputation_order == 'arabic': assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1)) elif imputation_order == 'random': ordered_idx_round_1 = ordered_idx[:d-1] ordered_idx_round_2 = ordered_idx[d-1:] assert ordered_idx_round_1 != ordered_idx_round_2 elif 'ending' in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, estimator=estimator, random_state=rng) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = (type(estimator) if estimator is not None else type(BayesianRidge())) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer(missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order='random', random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range pytest.importorskip("scipy", minversion="0.17.0") rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer(min_value=0, max_value=0.5, sample_posterior=True, random_state=rng) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, \ "The posterior does appear to be normal" @pytest.mark.parametrize( "strategy", ["mean", "median", "most_frequent"] ) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer(missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose(imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]) def test_iterative_imputer_transform_stochasticity(): pytest.importorskip("scipy", minversion="0.17.0") rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng1) imputer2 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng2) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize( "rank", [3, 5] ) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) @pytest.mark.parametrize("max_iter, tol, error_type, warning", [ (-1, 1e-3, ValueError, 'should be a positive integer'), (1, -1e-3, ValueError, 'should be a non-negative float') ]) def test_iterative_imputer_error_param(max_iter, tol, error_type, warning): X = np.zeros((100, 2)) imputer = IterativeImputer(max_iter=max_iter, tol=tol) with pytest.raises(error_type, match=warning): imputer.fit_transform(X) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer(max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer(max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_boston(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with pytest.warns(None) as record: X_fill = imputer.fit_transform(X, y) assert not record.list assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "X_fit, X_trans, params, msg_err", [(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]), {'features': 'missing-only', 'sparse': 'auto'}, 'have missing values in transform but have no missing values in fit'), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'random', 'sparse': 'auto'}, "'features' has to be either 'missing-only' or 'all'"), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'all', 'sparse': 'random'}, "'sparse' has to be a boolean or 'auto'"), (np.array([['a', 'b'], ['c', 'a']], dtype=str), np.array([['a', 'b'], ['c', 'a']], dtype=str), {}, "MissingIndicator does not support data with dtype")] ) def test_missing_indicator_error(X_fit, X_trans, params, msg_err): indicator = MissingIndicator(missing_values=-1) indicator.set_params(**params) with pytest.raises(ValueError, match=msg_err): indicator.fit(X_fit).transform(X_trans) @pytest.mark.parametrize( "missing_values, dtype, arr_type", [(np.nan, np.float64, np.array), (0, np.int32, np.array), (-1, np.int32, np.array), (np.nan, np.float64, sparse.csc_matrix), (-1, np.int32, sparse.csc_matrix), (np.nan, np.float64, sparse.csr_matrix), (-1, np.int32, sparse.csr_matrix), (np.nan, np.float64, sparse.coo_matrix), (-1, np.int32, sparse.coo_matrix), (np.nan, np.float64, sparse.lil_matrix), (-1, np.int32, sparse.lil_matrix), (np.nan, np.float64, sparse.bsr_matrix), (-1, np.int32, sparse.bsr_matrix) ]) @pytest.mark.parametrize( "param_features, n_features, features_indices", [('missing-only', 3, np.array([0, 1, 2])), ('all', 3, np.array([0, 1, 2]))]) def test_missing_indicator_new(missing_values, arr_type, dtype, param_features, n_features, features_indices): X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) # convert the input to the right array format and right dtype X_fit = arr_type(X_fit).astype(dtype) X_trans = arr_type(X_trans).astype(dtype) X_fit_expected = X_fit_expected.astype(dtype) X_trans_expected = X_trans_expected.astype(dtype) indicator = MissingIndicator(missing_values=missing_values, features=param_features, sparse=False) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) assert X_fit_mask.shape[1] == n_features assert X_trans_mask.shape[1] == n_features assert_array_equal(indicator.features_, features_indices) assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) assert X_fit_mask.dtype == bool assert X_trans_mask.dtype == bool assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) indicator.set_params(sparse=True) X_fit_mask_sparse = indicator.fit_transform(X_fit) X_trans_mask_sparse = indicator.transform(X_trans) assert X_fit_mask_sparse.dtype == bool assert X_trans_mask_sparse.dtype == bool assert X_fit_mask_sparse.format == 'csc' assert X_trans_mask_sparse.format == 'csc' assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) @pytest.mark.parametrize( "arr_type", [sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix]) def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): # test for sparse input and missing_value == 0 missing_values = 0 X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) # convert the input to the right array format X_fit_sparse = arr_type(X_fit) X_trans_sparse = arr_type(X_trans) indicator = MissingIndicator(missing_values=missing_values) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.fit_transform(X_fit_sparse) indicator.fit_transform(X_fit) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.transform(X_trans_sparse) @pytest.mark.parametrize("param_sparse", [True, False, 'auto']) @pytest.mark.parametrize("missing_values, arr_type", [(np.nan, np.array), (0, np.array), (np.nan, sparse.csc_matrix), (np.nan, sparse.csr_matrix), (np.nan, sparse.coo_matrix), (np.nan, sparse.lil_matrix) ]) def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): # check the format of the output with different sparse parameter X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit = arr_type(X_fit).astype(np.float64) X_trans = arr_type(X_trans).astype(np.float64) indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) if param_sparse is True: assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' elif param_sparse == 'auto' and missing_values == 0: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) elif param_sparse is False: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) else: if sparse.issparse(X_fit): assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' else: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) def test_missing_indicator_string(): X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object) indicator = MissingIndicator(missing_values='a', features='all') X_trans = indicator.fit_transform(X) assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) @pytest.mark.parametrize( "X, missing_values, X_trans_exp", [(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a', np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[np.nan, 1.], [1., np.nan]]), np.nan, np.array([[1., 1., True, False], [1., 1., False, True]])), (np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[None, 'b'], ['b', None]], dtype=object), None, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object))] ) def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): trans = make_union( SimpleImputer(missing_values=missing_values, strategy='most_frequent'), MissingIndicator(missing_values=missing_values) ) X_trans = trans.fit_transform(X) assert_array_equal(X_trans, X_trans_exp) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) @pytest.mark.parametrize( "imputer_missing_values, missing_value, err_msg", [("NaN", np.nan, "Input contains NaN"), ("-1", -1, "types are expected to be both numerical.")]) def test_inconsistent_dtype_X_missing_values(imputer_constructor, imputer_missing_values, missing_value, err_msg): # regression test for issue #11390. Comparison between incoherent dtype # for X and missing_values was not raising a proper error. rng = np.random.RandomState(42) X = rng.randn(10, 10) X[0, 0] = missing_value imputer = imputer_constructor(missing_values=imputer_missing_values) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(X) def test_missing_indicator_no_missing(): # check that all features are dropped if there are no missing values when # features='missing-only' (#13491) X = np.array([[1, 1], [1, 1]]) mi = MissingIndicator(features='missing-only', missing_values=-1) Xt = mi.fit_transform(X) assert Xt.shape[1] == 0 def test_missing_indicator_sparse_no_explicit_zeros(): # Check that non missing values don't become explicit zeros in the mask # generated by missing indicator when X is sparse. (#13491) X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) mi = MissingIndicator(features='all', missing_values=1) Xt = mi.fit_transform(X) assert Xt.getnnz() == Xt.sum() @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputers_add_indicator(marker, imputer_constructor): X = np.array([ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4] ]) X_true_indicator = np.array([ [1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.] ]) imputer = imputer_constructor(missing_values=marker, add_indicator=True) X_trans = imputer.fit(X).transform(X) # The test is for testing the indicator, # that's why we're looking at the last 4 columns only. assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputer_without_indicator(imputer_constructor): X = np.array([[1, 1], [1, 1]]) imputer = imputer_constructor() imputer.fit(X) assert imputer.indicator_ is None @pytest.mark.parametrize( "arr_type", [ sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix ] ) def test_simple_imputation_add_indicator_sparse_matrix(arr_type): X_sparse = arr_type([ [np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9] ]) X_true = np.array([ [3., 1., 5., 1., 0., 0.], [2., 2., 1., 0., 1., 0.], [6., 3., 5., 0., 0., 1.], [1., 2., 9., 0., 0., 0.], ]) imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) X_trans = imputer.fit_transform(X_sparse) assert sparse.issparse(X_trans) assert X_trans.shape == X_true.shape assert_allclose(X_trans.toarray(), X_true)
chrsrds/scikit-learn
sklearn/impute/tests/test_impute.py
sklearn/utils/tests/test_class_weight.py
# Author: Virgile Fritsch <virgile.fritsch@inria.fr> # License: BSD 3 clause import numpy def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('__check_build', parent_package, top_path) config.add_extension('_check_build', sources=['_check_build.pyx'], include_dirs=[numpy.get_include()]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
from __future__ import division import pytest import numpy as np from scipy import sparse from scipy.stats import kstest import io from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_boston from sklearn.impute import MissingIndicator from sklearn.impute import SimpleImputer, IterativeImputer from sklearn.dummy import DummyRegressor from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV from sklearn.pipeline import Pipeline from sklearn.pipeline import make_union from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "sparse = {0}" % (strategy, missing_values) assert_ae = assert_array_equal if X.dtype.kind == 'f' or X_true.dtype.kind == 'f': assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values, strategy=strategy) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ['mean', 'median', 'most_frequent', "constant"]) def test_imputation_shape(strategy): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["const", 101, None]) def test_imputation_error_invalid_strategy(strategy): X = np.ones((3, 5)) X[0, 0] = np.nan with pytest.raises(ValueError, match=str(strategy)): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan with pytest.warns(UserWarning, match="Deleting"): imputer = SimpleImputer(strategy=strategy, verbose=True) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) def test_imputation_error_sparse_0(strategy): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = sparse.csc_matrix(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v))))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) with pytest.raises(ValueError, match="non-numeric data"): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array([ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array([ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object) X_true = np.array([ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"] ], dtype=object) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value with pytest.raises(ValueError, match="imputing numerical"): imputer = SimpleImputer(missing_values=missing_value, strategy="constant", fill_value="x") imputer.fit_transform(X) def test_imputation_constant_integer(): # Test imputation using the constant strategy on integers X = np.array([ [-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1] ]) X_true = np.array([ [0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0] ]) imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray]) def test_imputation_constant_float(array_constructor): # Test imputation using the constant strategy on floats X = np.array([ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan] ]) X_true = np.array([ [-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1] ]) X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer(strategy="constant", fill_value=-1) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_constant_object(marker): # Test imputation using the constant strategy on objects X = np.array([ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker] ], dtype=object) X_true = np.array([ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"] ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="constant", fill_value="missing") X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_constant_pandas(dtype): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"] ], dtype=object) imputer = SimpleImputer(strategy="constant") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize('Imputer', (SimpleImputer, IterativeImputer)) def test_imputation_missing_value_in_test_array(Imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = Imputer(add_indicator=True) imputer.fit(train).transform(test) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline([('imputer', SimpleImputer(missing_values=missing_values)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"] } Y = sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ['random', 'roman', 'ascending', 'descending', 'arabic'] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer(missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert (len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_) if imputation_order == 'roman': assert np.all(ordered_idx[:d-1] == np.arange(1, d)) elif imputation_order == 'arabic': assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1)) elif imputation_order == 'random': ordered_idx_round_1 = ordered_idx[:d-1] ordered_idx_round_2 = ordered_idx[d-1:] assert ordered_idx_round_1 != ordered_idx_round_2 elif 'ending' in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, estimator=estimator, random_state=rng) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = (type(estimator) if estimator is not None else type(BayesianRidge())) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer(missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order='random', random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range pytest.importorskip("scipy", minversion="0.17.0") rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer(min_value=0, max_value=0.5, sample_posterior=True, random_state=rng) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, \ "The posterior does appear to be normal" @pytest.mark.parametrize( "strategy", ["mean", "median", "most_frequent"] ) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer(missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose(imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]) def test_iterative_imputer_transform_stochasticity(): pytest.importorskip("scipy", minversion="0.17.0") rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng1) imputer2 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng2) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize( "rank", [3, 5] ) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) @pytest.mark.parametrize("max_iter, tol, error_type, warning", [ (-1, 1e-3, ValueError, 'should be a positive integer'), (1, -1e-3, ValueError, 'should be a non-negative float') ]) def test_iterative_imputer_error_param(max_iter, tol, error_type, warning): X = np.zeros((100, 2)) imputer = IterativeImputer(max_iter=max_iter, tol=tol) with pytest.raises(error_type, match=warning): imputer.fit_transform(X) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer(max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer(max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_boston(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with pytest.warns(None) as record: X_fill = imputer.fit_transform(X, y) assert not record.list assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "X_fit, X_trans, params, msg_err", [(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]), {'features': 'missing-only', 'sparse': 'auto'}, 'have missing values in transform but have no missing values in fit'), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'random', 'sparse': 'auto'}, "'features' has to be either 'missing-only' or 'all'"), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'all', 'sparse': 'random'}, "'sparse' has to be a boolean or 'auto'"), (np.array([['a', 'b'], ['c', 'a']], dtype=str), np.array([['a', 'b'], ['c', 'a']], dtype=str), {}, "MissingIndicator does not support data with dtype")] ) def test_missing_indicator_error(X_fit, X_trans, params, msg_err): indicator = MissingIndicator(missing_values=-1) indicator.set_params(**params) with pytest.raises(ValueError, match=msg_err): indicator.fit(X_fit).transform(X_trans) @pytest.mark.parametrize( "missing_values, dtype, arr_type", [(np.nan, np.float64, np.array), (0, np.int32, np.array), (-1, np.int32, np.array), (np.nan, np.float64, sparse.csc_matrix), (-1, np.int32, sparse.csc_matrix), (np.nan, np.float64, sparse.csr_matrix), (-1, np.int32, sparse.csr_matrix), (np.nan, np.float64, sparse.coo_matrix), (-1, np.int32, sparse.coo_matrix), (np.nan, np.float64, sparse.lil_matrix), (-1, np.int32, sparse.lil_matrix), (np.nan, np.float64, sparse.bsr_matrix), (-1, np.int32, sparse.bsr_matrix) ]) @pytest.mark.parametrize( "param_features, n_features, features_indices", [('missing-only', 3, np.array([0, 1, 2])), ('all', 3, np.array([0, 1, 2]))]) def test_missing_indicator_new(missing_values, arr_type, dtype, param_features, n_features, features_indices): X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) # convert the input to the right array format and right dtype X_fit = arr_type(X_fit).astype(dtype) X_trans = arr_type(X_trans).astype(dtype) X_fit_expected = X_fit_expected.astype(dtype) X_trans_expected = X_trans_expected.astype(dtype) indicator = MissingIndicator(missing_values=missing_values, features=param_features, sparse=False) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) assert X_fit_mask.shape[1] == n_features assert X_trans_mask.shape[1] == n_features assert_array_equal(indicator.features_, features_indices) assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) assert X_fit_mask.dtype == bool assert X_trans_mask.dtype == bool assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) indicator.set_params(sparse=True) X_fit_mask_sparse = indicator.fit_transform(X_fit) X_trans_mask_sparse = indicator.transform(X_trans) assert X_fit_mask_sparse.dtype == bool assert X_trans_mask_sparse.dtype == bool assert X_fit_mask_sparse.format == 'csc' assert X_trans_mask_sparse.format == 'csc' assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) @pytest.mark.parametrize( "arr_type", [sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix]) def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): # test for sparse input and missing_value == 0 missing_values = 0 X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) # convert the input to the right array format X_fit_sparse = arr_type(X_fit) X_trans_sparse = arr_type(X_trans) indicator = MissingIndicator(missing_values=missing_values) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.fit_transform(X_fit_sparse) indicator.fit_transform(X_fit) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.transform(X_trans_sparse) @pytest.mark.parametrize("param_sparse", [True, False, 'auto']) @pytest.mark.parametrize("missing_values, arr_type", [(np.nan, np.array), (0, np.array), (np.nan, sparse.csc_matrix), (np.nan, sparse.csr_matrix), (np.nan, sparse.coo_matrix), (np.nan, sparse.lil_matrix) ]) def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): # check the format of the output with different sparse parameter X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit = arr_type(X_fit).astype(np.float64) X_trans = arr_type(X_trans).astype(np.float64) indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) if param_sparse is True: assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' elif param_sparse == 'auto' and missing_values == 0: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) elif param_sparse is False: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) else: if sparse.issparse(X_fit): assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' else: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) def test_missing_indicator_string(): X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object) indicator = MissingIndicator(missing_values='a', features='all') X_trans = indicator.fit_transform(X) assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) @pytest.mark.parametrize( "X, missing_values, X_trans_exp", [(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a', np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[np.nan, 1.], [1., np.nan]]), np.nan, np.array([[1., 1., True, False], [1., 1., False, True]])), (np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[None, 'b'], ['b', None]], dtype=object), None, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object))] ) def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): trans = make_union( SimpleImputer(missing_values=missing_values, strategy='most_frequent'), MissingIndicator(missing_values=missing_values) ) X_trans = trans.fit_transform(X) assert_array_equal(X_trans, X_trans_exp) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) @pytest.mark.parametrize( "imputer_missing_values, missing_value, err_msg", [("NaN", np.nan, "Input contains NaN"), ("-1", -1, "types are expected to be both numerical.")]) def test_inconsistent_dtype_X_missing_values(imputer_constructor, imputer_missing_values, missing_value, err_msg): # regression test for issue #11390. Comparison between incoherent dtype # for X and missing_values was not raising a proper error. rng = np.random.RandomState(42) X = rng.randn(10, 10) X[0, 0] = missing_value imputer = imputer_constructor(missing_values=imputer_missing_values) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(X) def test_missing_indicator_no_missing(): # check that all features are dropped if there are no missing values when # features='missing-only' (#13491) X = np.array([[1, 1], [1, 1]]) mi = MissingIndicator(features='missing-only', missing_values=-1) Xt = mi.fit_transform(X) assert Xt.shape[1] == 0 def test_missing_indicator_sparse_no_explicit_zeros(): # Check that non missing values don't become explicit zeros in the mask # generated by missing indicator when X is sparse. (#13491) X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) mi = MissingIndicator(features='all', missing_values=1) Xt = mi.fit_transform(X) assert Xt.getnnz() == Xt.sum() @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputers_add_indicator(marker, imputer_constructor): X = np.array([ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4] ]) X_true_indicator = np.array([ [1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.] ]) imputer = imputer_constructor(missing_values=marker, add_indicator=True) X_trans = imputer.fit(X).transform(X) # The test is for testing the indicator, # that's why we're looking at the last 4 columns only. assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputer_without_indicator(imputer_constructor): X = np.array([[1, 1], [1, 1]]) imputer = imputer_constructor() imputer.fit(X) assert imputer.indicator_ is None @pytest.mark.parametrize( "arr_type", [ sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix ] ) def test_simple_imputation_add_indicator_sparse_matrix(arr_type): X_sparse = arr_type([ [np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9] ]) X_true = np.array([ [3., 1., 5., 1., 0., 0.], [2., 2., 1., 0., 1., 0.], [6., 3., 5., 0., 0., 1.], [1., 2., 9., 0., 0., 0.], ]) imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) X_trans = imputer.fit_transform(X_sparse) assert sparse.issparse(X_trans) assert X_trans.shape == X_true.shape assert_allclose(X_trans.toarray(), X_true)
chrsrds/scikit-learn
sklearn/impute/tests/test_impute.py
sklearn/__check_build/setup.py
""" Our own implementation of the Newton algorithm Unlike the scipy.optimize version, this version of the Newton conjugate gradient solver uses only one function call to retrieve the func value, the gradient value and a callable for the Hessian matvec product. If the function call is very expensive (e.g. for logistic regression with large design matrix), this approach gives very significant speedups. """ # This is a modified file from scipy.optimize # Original authors: Travis Oliphant, Eric Jones # Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour # License: BSD import numpy as np import warnings from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1 from ..exceptions import ConvergenceWarning class _LineSearchError(RuntimeError): pass def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs): """ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if suitable step length is not found, and raise an exception if a suitable step length is not found. Raises ------ _LineSearchError If no suitable step size is found """ ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs) if ret[0] is None: # line search failed: try different one. ret = line_search_wolfe2(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs) if ret[0] is None: raise _LineSearchError() return ret def _cg(fhess_p, fgrad, maxiter, tol): """ Solve iteratively the linear system 'fhess_p . xsupi = fgrad' with a conjugate gradient descent. Parameters ---------- fhess_p : callable Function that takes the gradient as a parameter and returns the matrix product of the Hessian and gradient fgrad : ndarray, shape (n_features,) or (n_features + 1,) Gradient vector maxiter : int Number of CG iterations. tol : float Stopping criterion. Returns ------- xsupi : ndarray, shape (n_features,) or (n_features + 1,) Estimated solution """ xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype) ri = fgrad psupi = -ri i = 0 dri0 = np.dot(ri, ri) while i <= maxiter: if np.sum(np.abs(ri)) <= tol: break Ap = fhess_p(psupi) # check curvature curv = np.dot(psupi, Ap) if 0 <= curv <= 3 * np.finfo(np.float64).eps: break elif curv < 0: if i > 0: break else: # fall back to steepest descent direction xsupi += dri0 / curv * psupi break alphai = dri0 / curv xsupi += alphai * psupi ri = ri + alphai * Ap dri1 = np.dot(ri, ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update np.dot(ri,ri) for next time. return xsupi def newton_cg(grad_hess, func, grad, x0, args=(), tol=1e-4, maxiter=100, maxinner=200, line_search=True, warn=True): """ Minimization of scalar function of one or more variables using the Newton-CG algorithm. Parameters ---------- grad_hess : callable Should return the gradient and a callable returning the matvec product of the Hessian. func : callable Should return the value of the function. grad : callable Should return the function value and the gradient. This is used by the linesearch functions. x0 : array of float Initial guess. args : tuple, optional Arguments passed to func_grad_hess, func and grad. tol : float Stopping criterion. The iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. maxiter : int Number of Newton iterations. maxinner : int Number of CG iterations. line_search : boolean Whether to use a line search or not. warn : boolean Whether to warn when didn't converge. Returns ------- xk : ndarray of float Estimated minimum. """ x0 = np.asarray(x0).flatten() xk = x0 k = 0 if line_search: old_fval = func(x0, *args) old_old_fval = None # Outer loop: our Newton iteration while k < maxiter: # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - fgrad f(xk) starting from 0. fgrad, fhess_p = grad_hess(xk, *args) absgrad = np.abs(fgrad) if np.max(absgrad) < tol: break maggrad = np.sum(absgrad) eta = min([0.5, np.sqrt(maggrad)]) termcond = eta * maggrad # Inner loop: solve the Newton update by conjugate gradient, to # avoid inverting the Hessian xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond) alphak = 1.0 if line_search: try: alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(func, grad, xk, xsupi, fgrad, old_fval, old_old_fval, args=args) except _LineSearchError: warnings.warn('Line Search failed') break xk = xk + alphak * xsupi # upcast if necessary k += 1 if warn and k >= maxiter: warnings.warn("newton-cg failed to converge. Increase the " "number of iterations.", ConvergenceWarning) return xk, k def _check_optimize_result(solver, result, max_iter=None): """Check the OptimizeResult for successful convergence Parameters ---------- solver: str solver name. Currently only `lbfgs` is supported. result: OptimizeResult result of the scipy.optimize.minimize function max_iter: {int, None} expected maximum number of iterations Returns ------- n_iter: int number of iterations """ # handle both scipy and scikit-learn solver names if solver == "lbfgs": if result.status != 0: warnings.warn("{} failed to converge (status={}): {}. " "Increase the number of iterations." .format(solver, result.status, result.message), ConvergenceWarning) if max_iter is not None: # In scipy <= 1.0.0, nit may exceed maxiter for lbfgs. # See https://github.com/scipy/scipy/issues/7854 n_iter_i = min(result.nit, max_iter) else: n_iter_i = result.nit else: raise NotImplementedError return n_iter_i
from __future__ import division import pytest import numpy as np from scipy import sparse from scipy.stats import kstest import io from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal # make IterativeImputer available from sklearn.experimental import enable_iterative_imputer # noqa from sklearn.datasets import load_boston from sklearn.impute import MissingIndicator from sklearn.impute import SimpleImputer, IterativeImputer from sklearn.dummy import DummyRegressor from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV from sklearn.pipeline import Pipeline from sklearn.pipeline import make_union from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "sparse = {0}" % (strategy, missing_values) assert_ae = assert_array_equal if X.dtype.kind == 'f' or X_true.dtype.kind == 'f': assert_ae = assert_array_almost_equal # Normal matrix imputer = SimpleImputer(missing_values, strategy=strategy) X_trans = imputer.fit(X).transform(X.copy()) assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False)) assert_ae(X_trans, X_true, err_msg=err_msg.format(False)) # Sparse matrix imputer = SimpleImputer(missing_values, strategy=strategy) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True)) assert_ae(X_trans, X_true, err_msg=err_msg.format(True)) @pytest.mark.parametrize("strategy", ['mean', 'median', 'most_frequent', "constant"]) def test_imputation_shape(strategy): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan imputer = SimpleImputer(strategy=strategy) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert X_imputed.shape == (10, 2) X_imputed = imputer.fit_transform(X) assert X_imputed.shape == (10, 2) iterative_imputer = IterativeImputer(initial_strategy=strategy) X_imputed = iterative_imputer.fit_transform(X) assert X_imputed.shape == (10, 2) @pytest.mark.parametrize("strategy", ["const", 101, None]) def test_imputation_error_invalid_strategy(strategy): X = np.ones((3, 5)) X[0, 0] = np.nan with pytest.raises(ValueError, match=str(strategy)): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) def test_imputation_deletion_warning(strategy): X = np.ones((3, 5)) X[:, 0] = np.nan with pytest.warns(UserWarning, match="Deleting"): imputer = SimpleImputer(strategy=strategy, verbose=True) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"]) def test_imputation_error_sparse_0(strategy): # check that error are raised when missing_values = 0 and input is sparse X = np.ones((3, 5)) X[0] = 0 X = sparse.csc_matrix(X) imputer = SimpleImputer(strategy=strategy, missing_values=0) with pytest.raises(ValueError, match="Provide a dense array"): imputer.fit(X) imputer.fit(X.toarray()) with pytest.raises(ValueError, match="Provide a dense array"): imputer.transform(X) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))), ("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v))))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, np.nan) @pytest.mark.parametrize("strategy", ["mean", "median"]) @pytest.mark.parametrize("dtype", [None, object, str]) def test_imputation_mean_median_error_invalid_type(strategy, dtype): X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype) with pytest.raises(ValueError, match="non-numeric data"): imputer = SimpleImputer(strategy=strategy) imputer.fit_transform(X) @pytest.mark.parametrize("strategy", ["constant", "most_frequent"]) @pytest.mark.parametrize("dtype", [str, np.dtype('U'), np.dtype('S')]) def test_imputation_const_mostf_error_invalid_types(strategy, dtype): # Test imputation on non-numeric data using "most_frequent" and "constant" # strategy X = np.array([ [np.nan, np.nan, "a", "f"], [np.nan, "c", np.nan, "d"], [np.nan, "b", "d", np.nan], [np.nan, "c", "d", "h"], ], dtype=dtype) err_msg = "SimpleImputer does not support data" with pytest.raises(ValueError, match=err_msg): imputer = SimpleImputer(strategy=strategy) imputer.fit(X).transform(X) def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in SimpleImputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, SimpleImputer will need to be # updated to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_most_frequent_objects(marker): # Test imputation using the most-frequent strategy. X = np.array([ [marker, marker, "a", "f"], [marker, "c", marker, "d"], [marker, "b", "d", marker], [marker, "c", "d", "h"], ], dtype=object) X_true = np.array([ ["c", "a", "f"], ["c", "d", "d"], ["b", "d", "d"], ["c", "d", "h"], ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="most_frequent") X_trans = imputer.fit(X).transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_most_frequent_pandas(dtype): # Test imputation using the most frequent strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"] ], dtype=object) imputer = SimpleImputer(strategy="most_frequent") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1., np.nan)]) def test_imputation_constant_error_invalid_type(X_data, missing_value): # Verify that exceptions are raised on invalid fill_value type X = np.full((3, 5), X_data, dtype=float) X[0, 0] = missing_value with pytest.raises(ValueError, match="imputing numerical"): imputer = SimpleImputer(missing_values=missing_value, strategy="constant", fill_value="x") imputer.fit_transform(X) def test_imputation_constant_integer(): # Test imputation using the constant strategy on integers X = np.array([ [-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1] ]) X_true = np.array([ [0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0] ]) imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0) X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray]) def test_imputation_constant_float(array_constructor): # Test imputation using the constant strategy on floats X = np.array([ [np.nan, 1.1, 0, np.nan], [1.2, np.nan, 1.3, np.nan], [0, 0, np.nan, np.nan], [1.4, 1.5, 0, np.nan] ]) X_true = np.array([ [-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1] ]) X = array_constructor(X) X_true = array_constructor(X_true) imputer = SimpleImputer(strategy="constant", fill_value=-1) X_trans = imputer.fit_transform(X) assert_allclose_dense_sparse(X_trans, X_true) @pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0]) def test_imputation_constant_object(marker): # Test imputation using the constant strategy on objects X = np.array([ [marker, "a", "b", marker], ["c", marker, "d", marker], ["e", "f", marker, marker], ["g", "h", "i", marker] ], dtype=object) X_true = np.array([ ["missing", "a", "b", "missing"], ["c", "missing", "d", "missing"], ["e", "f", "missing", "missing"], ["g", "h", "i", "missing"] ], dtype=object) imputer = SimpleImputer(missing_values=marker, strategy="constant", fill_value="missing") X_trans = imputer.fit_transform(X) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize("dtype", [object, "category"]) def test_imputation_constant_pandas(dtype): # Test imputation using the constant strategy on pandas df pd = pytest.importorskip("pandas") f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n" ",i,x,\n" "a,,y,\n" "a,j,,\n" "b,j,x,") df = pd.read_csv(f, dtype=dtype) X_true = np.array([ ["missing_value", "i", "x", "missing_value"], ["a", "missing_value", "y", "missing_value"], ["a", "j", "missing_value", "missing_value"], ["b", "j", "x", "missing_value"] ], dtype=object) imputer = SimpleImputer(strategy="constant") X_trans = imputer.fit_transform(df) assert_array_equal(X_trans, X_true) @pytest.mark.parametrize('Imputer', (SimpleImputer, IterativeImputer)) def test_imputation_missing_value_in_test_array(Imputer): # [Non Regression Test for issue #13968] Missing value in test set should # not throw an error and return a finite dataset train = [[1], [2]] test = [[3], [np.nan]] imputer = Imputer(add_indicator=True) imputer.fit(train).transform(test) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. X = sparse_random_matrix(100, 100, density=0.10) missing_values = X.data[0] pipeline = Pipeline([('imputer', SimpleImputer(missing_values=missing_values)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"] } Y = sparse_random_matrix(100, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert not np.all(X == Xt) # copy=True, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_almost_equal(X, Xt) # copy=False, sparse csc => no copy X = X_orig.copy().tocsc() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_almost_equal(X.data, Xt.data) # copy=False, sparse csr => copy X = X_orig.copy() imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert not np.all(X.data == Xt.data) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False. def test_iterative_imputer_zero_iters(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() missing_flag = X == 0 X[missing_flag] = np.nan imputer = IterativeImputer(max_iter=0) X_imputed = imputer.fit_transform(X) # with max_iter=0, only initial imputation is performed assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) # repeat but force n_iter_ to 0 imputer = IterativeImputer(max_iter=5).fit(X) # transformed should not be equal to initial imputation assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X)) imputer.n_iter_ = 0 # now they should be equal as only initial imputation is done assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X)) def test_iterative_imputer_verbose(): rng = np.random.RandomState(0) n = 100 d = 3 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1) imputer.fit(X) imputer.transform(X) imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2) imputer.fit(X) imputer.transform(X) def test_iterative_imputer_all_missing(): n = 100 d = 3 X = np.zeros((n, d)) imputer = IterativeImputer(missing_values=0, max_iter=1) X_imputed = imputer.fit_transform(X) assert_allclose(X_imputed, imputer.initial_imputer_.transform(X)) @pytest.mark.parametrize( "imputation_order", ['random', 'roman', 'ascending', 'descending', 'arabic'] ) def test_iterative_imputer_imputation_order(imputation_order): rng = np.random.RandomState(0) n = 100 d = 10 max_iter = 2 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 # this column should not be discarded by IterativeImputer imputer = IterativeImputer(missing_values=0, max_iter=max_iter, n_nearest_features=5, sample_posterior=False, min_value=0, max_value=1, verbose=1, imputation_order=imputation_order, random_state=rng) imputer.fit_transform(X) ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_] assert (len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_) if imputation_order == 'roman': assert np.all(ordered_idx[:d-1] == np.arange(1, d)) elif imputation_order == 'arabic': assert np.all(ordered_idx[:d-1] == np.arange(d-1, 0, -1)) elif imputation_order == 'random': ordered_idx_round_1 = ordered_idx[:d-1] ordered_idx_round_2 = ordered_idx[d-1:] assert ordered_idx_round_1 != ordered_idx_round_2 elif 'ending' in imputation_order: assert len(ordered_idx) == max_iter * (d - 1) @pytest.mark.parametrize( "estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()] ) def test_iterative_imputer_estimators(estimator): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, estimator=estimator, random_state=rng) imputer.fit_transform(X) # check that types are correct for estimators hashes = [] for triplet in imputer.imputation_sequence_: expected_type = (type(estimator) if estimator is not None else type(BayesianRidge())) assert isinstance(triplet.estimator, expected_type) hashes.append(id(triplet.estimator)) # check that each estimator is unique assert len(set(hashes)) == len(hashes) def test_iterative_imputer_clip(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() imputer = IterativeImputer(missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_clip_truncnorm(): rng = np.random.RandomState(0) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray() X[:, 0] = 1 imputer = IterativeImputer(missing_values=0, max_iter=2, n_nearest_features=5, sample_posterior=True, min_value=0.1, max_value=0.2, verbose=1, imputation_order='random', random_state=rng) Xt = imputer.fit_transform(X) assert_allclose(np.min(Xt[X == 0]), 0.1) assert_allclose(np.max(Xt[X == 0]), 0.2) assert_allclose(Xt[X != 0], X[X != 0]) def test_iterative_imputer_truncated_normal_posterior(): # test that the values that are imputed using `sample_posterior=True` # with boundaries (`min_value` and `max_value` are not None) are drawn # from a distribution that looks gaussian via the Kolmogorov Smirnov test. # note that starting from the wrong random seed will make this test fail # because random sampling doesn't occur at all when the imputation # is outside of the (min_value, max_value) range pytest.importorskip("scipy", minversion="0.17.0") rng = np.random.RandomState(42) X = rng.normal(size=(5, 5)) X[0][0] = np.nan imputer = IterativeImputer(min_value=0, max_value=0.5, sample_posterior=True, random_state=rng) imputer.fit_transform(X) # generate multiple imputations for the single missing value imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)]) assert all(imputations >= 0) assert all(imputations <= 0.5) mu, sigma = imputations.mean(), imputations.std() ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') if sigma == 0: sigma += 1e-12 ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm') # we want to fail to reject null hypothesis # null hypothesis: distributions are the same assert ks_statistic < 0.2 or p_value > 0.1, \ "The posterior does appear to be normal" @pytest.mark.parametrize( "strategy", ["mean", "median", "most_frequent"] ) def test_iterative_imputer_missing_at_transform(strategy): rng = np.random.RandomState(0) n = 100 d = 10 X_train = rng.randint(low=0, high=3, size=(n, d)) X_test = rng.randint(low=0, high=3, size=(n, d)) X_train[:, 0] = 1 # definitely no missing values in 0th column X_test[0, 0] = 0 # definitely missing value in 0th column imputer = IterativeImputer(missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng).fit(X_train) initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train) # if there were no missing values at time of fit, then imputer will # only use the initial imputer for that feature at transform assert_allclose(imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]) def test_iterative_imputer_transform_stochasticity(): pytest.importorskip("scipy", minversion="0.17.0") rng1 = np.random.RandomState(0) rng2 = np.random.RandomState(1) n = 100 d = 10 X = sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray() # when sample_posterior=True, two transforms shouldn't be equal imputer = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1) imputer.fit(X) X_fitted_1 = imputer.transform(X) X_fitted_2 = imputer.transform(X) # sufficient to assert that the means are not the same assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2)) # when sample_posterior=False, and n_nearest_features=None # and imputation_order is not random # the two transforms should be identical even if rng are different imputer1 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng1) imputer2 = IterativeImputer(missing_values=0, max_iter=1, sample_posterior=False, n_nearest_features=None, imputation_order='ascending', random_state=rng2) imputer1.fit(X) imputer2.fit(X) X_fitted_1a = imputer1.transform(X) X_fitted_1b = imputer1.transform(X) X_fitted_2 = imputer2.transform(X) assert_allclose(X_fitted_1a, X_fitted_1b) assert_allclose(X_fitted_1a, X_fitted_2) def test_iterative_imputer_no_missing(): rng = np.random.RandomState(0) X = rng.rand(100, 100) X[:, 0] = np.nan m1 = IterativeImputer(max_iter=10, random_state=rng) m2 = IterativeImputer(max_iter=10, random_state=rng) pred1 = m1.fit(X).transform(X) pred2 = m2.fit_transform(X) # should exclude the first column entirely assert_allclose(X[:, 1:], pred1) # fit and fit_transform should both be identical assert_allclose(pred1, pred2) def test_iterative_imputer_rank_one(): rng = np.random.RandomState(0) d = 50 A = rng.rand(d, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(d, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng) X_filled = imputer.fit_transform(X_missing) assert_allclose(X_filled, X, atol=0.02) @pytest.mark.parametrize( "rank", [3, 5] ) def test_iterative_imputer_transform_recovery(rank): rng = np.random.RandomState(0) n = 70 d = 70 A = rng.rand(n, rank) B = rng.rand(rank, d) X_filled = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data in half n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, atol=0.1) def test_iterative_imputer_additive_matrix(): rng = np.random.RandomState(0) n = 100 d = 10 A = rng.randn(n, d) B = rng.randn(n, d) X_filled = np.zeros(A.shape) for i in range(d): for j in range(d): X_filled[:, (i+j) % d] += (A[:, i] + B[:, j]) / 2 # a quarter is randomly missing nan_mask = rng.rand(n, d) < 0.25 X_missing = X_filled.copy() X_missing[nan_mask] = np.nan # split up data n = n // 2 X_train = X_missing[:n] X_test_filled = X_filled[n:] X_test = X_missing[n:] imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train) X_test_est = imputer.transform(X_test) assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01) @pytest.mark.parametrize("max_iter, tol, error_type, warning", [ (-1, 1e-3, ValueError, 'should be a positive integer'), (1, -1e-3, ValueError, 'should be a non-negative float') ]) def test_iterative_imputer_error_param(max_iter, tol, error_type, warning): X = np.zeros((100, 2)) imputer = IterativeImputer(max_iter=max_iter, tol=tol) with pytest.raises(error_type, match=warning): imputer.fit_transform(X) def test_iterative_imputer_early_stopping(): rng = np.random.RandomState(0) n = 50 d = 5 A = rng.rand(n, 1) B = rng.rand(1, d) X = np.dot(A, B) nan_mask = rng.rand(n, d) < 0.5 X_missing = X.copy() X_missing[nan_mask] = np.nan imputer = IterativeImputer(max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng) X_filled_100 = imputer.fit_transform(X_missing) assert len(imputer.imputation_sequence_) == d * imputer.n_iter_ imputer = IterativeImputer(max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng) X_filled_early = imputer.fit_transform(X_missing) assert_allclose(X_filled_100, X_filled_early, atol=1e-7) imputer = IterativeImputer(max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng) imputer.fit(X_missing) assert imputer.n_iter_ == imputer.max_iter def test_iterative_imputer_catch_warning(): # check that we catch a RuntimeWarning due to a division by zero when a # feature is constant in the dataset X, y = load_boston(return_X_y=True) n_samples, n_features = X.shape # simulate that a feature only contain one category during fit X[:, 3] = 1 # add some missing values rng = np.random.RandomState(0) missing_rate = 0.15 for feat in range(n_features): sample_idx = rng.choice( np.arange(n_samples), size=int(n_samples * missing_rate), replace=False ) X[sample_idx, feat] = np.nan imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True) with pytest.warns(None) as record: X_fill = imputer.fit_transform(X, y) assert not record.list assert not np.any(np.isnan(X_fill)) @pytest.mark.parametrize( "X_fit, X_trans, params, msg_err", [(np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, -1]]), {'features': 'missing-only', 'sparse': 'auto'}, 'have missing values in transform but have no missing values in fit'), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'random', 'sparse': 'auto'}, "'features' has to be either 'missing-only' or 'all'"), (np.array([[-1, 1], [1, 2]]), np.array([[-1, 1], [1, 2]]), {'features': 'all', 'sparse': 'random'}, "'sparse' has to be a boolean or 'auto'"), (np.array([['a', 'b'], ['c', 'a']], dtype=str), np.array([['a', 'b'], ['c', 'a']], dtype=str), {}, "MissingIndicator does not support data with dtype")] ) def test_missing_indicator_error(X_fit, X_trans, params, msg_err): indicator = MissingIndicator(missing_values=-1) indicator.set_params(**params) with pytest.raises(ValueError, match=msg_err): indicator.fit(X_fit).transform(X_trans) @pytest.mark.parametrize( "missing_values, dtype, arr_type", [(np.nan, np.float64, np.array), (0, np.int32, np.array), (-1, np.int32, np.array), (np.nan, np.float64, sparse.csc_matrix), (-1, np.int32, sparse.csc_matrix), (np.nan, np.float64, sparse.csr_matrix), (-1, np.int32, sparse.csr_matrix), (np.nan, np.float64, sparse.coo_matrix), (-1, np.int32, sparse.coo_matrix), (np.nan, np.float64, sparse.lil_matrix), (-1, np.int32, sparse.lil_matrix), (np.nan, np.float64, sparse.bsr_matrix), (-1, np.int32, sparse.bsr_matrix) ]) @pytest.mark.parametrize( "param_features, n_features, features_indices", [('missing-only', 3, np.array([0, 1, 2])), ('all', 3, np.array([0, 1, 2]))]) def test_missing_indicator_new(missing_values, arr_type, dtype, param_features, n_features, features_indices): X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]]) X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]]) # convert the input to the right array format and right dtype X_fit = arr_type(X_fit).astype(dtype) X_trans = arr_type(X_trans).astype(dtype) X_fit_expected = X_fit_expected.astype(dtype) X_trans_expected = X_trans_expected.astype(dtype) indicator = MissingIndicator(missing_values=missing_values, features=param_features, sparse=False) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) assert X_fit_mask.shape[1] == n_features assert X_trans_mask.shape[1] == n_features assert_array_equal(indicator.features_, features_indices) assert_allclose(X_fit_mask, X_fit_expected[:, features_indices]) assert_allclose(X_trans_mask, X_trans_expected[:, features_indices]) assert X_fit_mask.dtype == bool assert X_trans_mask.dtype == bool assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) indicator.set_params(sparse=True) X_fit_mask_sparse = indicator.fit_transform(X_fit) X_trans_mask_sparse = indicator.transform(X_trans) assert X_fit_mask_sparse.dtype == bool assert X_trans_mask_sparse.dtype == bool assert X_fit_mask_sparse.format == 'csc' assert X_trans_mask_sparse.format == 'csc' assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask) assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask) @pytest.mark.parametrize( "arr_type", [sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix]) def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type): # test for sparse input and missing_value == 0 missing_values = 0 X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) # convert the input to the right array format X_fit_sparse = arr_type(X_fit) X_trans_sparse = arr_type(X_trans) indicator = MissingIndicator(missing_values=missing_values) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.fit_transform(X_fit_sparse) indicator.fit_transform(X_fit) with pytest.raises(ValueError, match="Sparse input with missing_values=0"): indicator.transform(X_trans_sparse) @pytest.mark.parametrize("param_sparse", [True, False, 'auto']) @pytest.mark.parametrize("missing_values, arr_type", [(np.nan, np.array), (0, np.array), (np.nan, sparse.csc_matrix), (np.nan, sparse.csr_matrix), (np.nan, sparse.coo_matrix), (np.nan, sparse.lil_matrix) ]) def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse): # check the format of the output with different sparse parameter X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]]) X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]]) X_fit = arr_type(X_fit).astype(np.float64) X_trans = arr_type(X_trans).astype(np.float64) indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse) X_fit_mask = indicator.fit_transform(X_fit) X_trans_mask = indicator.transform(X_trans) if param_sparse is True: assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' elif param_sparse == 'auto' and missing_values == 0: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) elif param_sparse is False: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) else: if sparse.issparse(X_fit): assert X_fit_mask.format == 'csc' assert X_trans_mask.format == 'csc' else: assert isinstance(X_fit_mask, np.ndarray) assert isinstance(X_trans_mask, np.ndarray) def test_missing_indicator_string(): X = np.array([['a', 'b', 'c'], ['b', 'c', 'a']], dtype=object) indicator = MissingIndicator(missing_values='a', features='all') X_trans = indicator.fit_transform(X) assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]])) @pytest.mark.parametrize( "X, missing_values, X_trans_exp", [(np.array([['a', 'b'], ['b', 'a']], dtype=object), 'a', np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[np.nan, 1.], [1., np.nan]]), np.nan, np.array([[1., 1., True, False], [1., 1., False, True]])), (np.array([[np.nan, 'b'], ['b', np.nan]], dtype=object), np.nan, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object)), (np.array([[None, 'b'], ['b', None]], dtype=object), None, np.array([['b', 'b', True, False], ['b', 'b', False, True]], dtype=object))] ) def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp): trans = make_union( SimpleImputer(missing_values=missing_values, strategy='most_frequent'), MissingIndicator(missing_values=missing_values) ) X_trans = trans.fit_transform(X) assert_array_equal(X_trans, X_trans_exp) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) @pytest.mark.parametrize( "imputer_missing_values, missing_value, err_msg", [("NaN", np.nan, "Input contains NaN"), ("-1", -1, "types are expected to be both numerical.")]) def test_inconsistent_dtype_X_missing_values(imputer_constructor, imputer_missing_values, missing_value, err_msg): # regression test for issue #11390. Comparison between incoherent dtype # for X and missing_values was not raising a proper error. rng = np.random.RandomState(42) X = rng.randn(10, 10) X[0, 0] = missing_value imputer = imputer_constructor(missing_values=imputer_missing_values) with pytest.raises(ValueError, match=err_msg): imputer.fit_transform(X) def test_missing_indicator_no_missing(): # check that all features are dropped if there are no missing values when # features='missing-only' (#13491) X = np.array([[1, 1], [1, 1]]) mi = MissingIndicator(features='missing-only', missing_values=-1) Xt = mi.fit_transform(X) assert Xt.shape[1] == 0 def test_missing_indicator_sparse_no_explicit_zeros(): # Check that non missing values don't become explicit zeros in the mask # generated by missing indicator when X is sparse. (#13491) X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]]) mi = MissingIndicator(features='all', missing_values=1) Xt = mi.fit_transform(X) assert Xt.getnnz() == Xt.sum() @pytest.mark.parametrize("marker", [np.nan, -1, 0]) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputers_add_indicator(marker, imputer_constructor): X = np.array([ [marker, 1, 5, marker, 1], [2, marker, 1, marker, 2], [6, 3, marker, marker, 3], [1, 2, 9, marker, 4] ]) X_true_indicator = np.array([ [1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.] ]) imputer = imputer_constructor(missing_values=marker, add_indicator=True) X_trans = imputer.fit(X).transform(X) # The test is for testing the indicator, # that's why we're looking at the last 4 columns only. assert_allclose(X_trans[:, -4:], X_true_indicator) assert_array_equal(imputer.indicator_.features_, np.array([0, 1, 2, 3])) @pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer]) def test_imputer_without_indicator(imputer_constructor): X = np.array([[1, 1], [1, 1]]) imputer = imputer_constructor() imputer.fit(X) assert imputer.indicator_ is None @pytest.mark.parametrize( "arr_type", [ sparse.csc_matrix, sparse.csr_matrix, sparse.coo_matrix, sparse.lil_matrix, sparse.bsr_matrix ] ) def test_simple_imputation_add_indicator_sparse_matrix(arr_type): X_sparse = arr_type([ [np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9] ]) X_true = np.array([ [3., 1., 5., 1., 0., 0.], [2., 2., 1., 0., 1., 0.], [6., 3., 5., 0., 0., 1.], [1., 2., 9., 0., 0., 0.], ]) imputer = SimpleImputer(missing_values=np.nan, add_indicator=True) X_trans = imputer.fit_transform(X_sparse) assert sparse.issparse(X_trans) assert X_trans.shape == X_true.shape assert_allclose(X_trans.toarray(), X_true)
chrsrds/scikit-learn
sklearn/impute/tests/test_impute.py
sklearn/utils/optimize.py
""" Expression splitting for chunked computation To evaluate an expression on a large dataset we may need to chunk that dataset into pieces and evaluate on each of the pieces individually. This module contains logic to break up an expression-to-be-evaluated-on-the-entire-array into 1. An expression to be evaluated on each chunk of the data 2. An expression to be evaluated on the concatenated intermediate results As an example, consider counting the number of non-null elements in a large list. We have the following recipe 1. Break the large collection into chunks, each of which fits comfortably into memory 2. For each chunk, call compute chunk.count() 3. Gather all of these results into a single list (which hopefully fits in memory) 4. for this aggregate, compute aggregate.sum() And so, given this expression expr -> expr.count() We needed the following expressions chunk -> chunk.count() agg -> agg.sum() This module performs this transformation for a wide array of chunkable expressions. It supports elementwise operations, reductions, split-apply-combine, and selections. It notably does not support sorting, joining, or slicing. If explicit chunksizes are given it can also reason about the size and shape of the intermediate aggregate. It can also do this in N-Dimensions. """ from __future__ import absolute_import, division, print_function from toolz import concat as tconcat import datashape from datashape.predicates import isscalar, isrecord from math import floor from .core import * from .expressions import * from .strings import Like from .expressions import ndim, shape from .math import sqrt from .reductions import * from .split_apply_combine import * from .collections import * from .table import * from ..dispatch import dispatch from ..compatibility import builtins good_to_split = (Reduction, Summary, By, Distinct) can_split = good_to_split + (Like, Selection, ElemWise, Apply) __all__ = ['path_split', 'split'] def path_split(leaf, expr): """ Find the right place in the expression tree/line to parallelize >>> t = symbol('t', 'var * {name: string, amount: int, id: int}') >>> path_split(t, t.amount.sum() + 1) sum(t.amount) >>> path_split(t, t.amount.distinct().sort()) distinct(t.amount) """ last = None for node in list(path(expr, leaf))[:-1][::-1]: if isinstance(node, good_to_split): return node elif not isinstance(node, can_split): return last last = node return node def split(leaf, expr, chunk=None, agg=None, **kwargs): """ Split expression for chunked computation Break up a computation ``leaf -> expr`` so that it can be run in chunks. This returns two computations, one to perform on each chunk and then one to perform on the union of these intermediate results chunk -> chunk-expr aggregate -> aggregate-expr The chunk_expr will have the same dimesions as the input (reductions set keepdims=True) so that this should work cleanly with concatenation functions like ``np.concatenate``. Returns ------- Pair of (Symbol, Expr) pairs (chunk, chunk_expr), (aggregate, aggregate_expr) >>> t = symbol('t', 'var * {name: string, amount: int, id: int}') >>> expr = t.id.count() >>> split(t, expr) ((chunk, count(chunk.id, keepdims=True)), (aggregate, sum(aggregate))) """ center = path_split(leaf, expr) if chunk is None: if leaf.ndim > 1: raise ValueError("Please provide a chunk symbol") else: chunk = symbol('chunk', datashape.var * leaf.dshape.measure) chunk_expr = _split_chunk(center, leaf=leaf, chunk=chunk, **kwargs) chunk_expr_with_keepdims = _split_chunk(center, leaf=leaf, chunk=chunk, keepdims=True) if agg is None: agg_shape = aggregate_shape(leaf, expr, chunk, chunk_expr_with_keepdims) agg_dshape = DataShape(*(agg_shape + (chunk_expr.dshape.measure,))) agg = symbol('aggregate', agg_dshape) agg_expr = _split_agg(center, leaf=leaf, agg=agg) return ((chunk, chunk_expr), (agg, expr._subs({center: agg})._subs({agg: agg_expr}))) reductions = {sum: (sum, sum), count: (count, sum), min: (min, min), max: (max, max), any: (any, any), all: (all, all), nelements: (nelements, sum)} @dispatch(Expr) def _split(expr, leaf=None, chunk=None, agg=None, keepdims=True): return ((chunk, _split_chunk(expr, leaf=leaf, chunk=chunk, keepdims=keepdims)), (agg, _split_agg(expr, leaf=leaf, agg=agg))) @dispatch(tuple(reductions)) def _split_chunk(expr, leaf=None, chunk=None, keepdims=True): a, b = reductions[type(expr)] return a(expr._subs({leaf: chunk})._child, keepdims=keepdims, axis=expr.axis) @dispatch(tuple(reductions)) def _split_agg(expr, leaf=None, agg=None): a, b = reductions[type(expr)] return b(agg, axis=expr.axis, keepdims=expr.keepdims) @dispatch(mean) def _split_chunk(expr, leaf=None, chunk=None, keepdims=True): child = expr._subs({leaf: chunk})._child return summary(total=child.sum(), count=child.count(), keepdims=keepdims, axis=expr.axis) @dispatch(mean) def _split_agg(expr, leaf=None, agg=None): total = agg.total.sum(axis=expr.axis, keepdims=expr.keepdims) count = agg.count.sum(axis=expr.axis, keepdims=expr.keepdims) return total / count @dispatch((std, var)) def _split_chunk(expr, leaf=None, chunk=None, keepdims=True): child = expr._subs({leaf: chunk})._child return summary(x=child.sum(), x2=(child**2).sum(), n=child.count(), keepdims=keepdims, axis=expr.axis) @dispatch(var) def _split_agg(expr, leaf=None, agg=None): x = agg.x.sum(axis=expr.axis, keepdims=expr.keepdims) x2 = agg.x2.sum(axis=expr.axis, keepdims=expr.keepdims) n = agg.n.sum(axis=expr.axis, keepdims=expr.keepdims) result = (x2 / n) - (x / n)**2 if expr.unbiased: result = result / (n - 1) * n return result @dispatch(std) def _split_agg(expr, leaf=None, agg=None): x = agg.x.sum(axis=expr.axis, keepdims=expr.keepdims) x2 = agg.x2.sum(axis=expr.axis, keepdims=expr.keepdims) n = agg.n.sum(axis=expr.axis, keepdims=expr.keepdims) result = (x2 / n) - (x / n)**2 if expr.unbiased: result = result / (n - 1) * n return sqrt(result) @dispatch(Distinct) def _split_chunk(expr, leaf=None, chunk=None, **kwargs): return expr._subs({leaf: chunk}) @dispatch(Distinct) def _split_agg(expr, leaf=None, agg=None): return agg.distinct() @dispatch(nunique) def _split_chunk(expr, leaf=None, chunk=None, **kwargs): return (expr._child ._subs({leaf: chunk}) .distinct()) @dispatch(nunique) def _split_agg(expr, leaf=None, agg=None): return agg.distinct().count(keepdims=expr.keepdims) @dispatch(Summary) def _split_chunk(expr, leaf=None, chunk=None, keepdims=True): exprs = [(name, split(leaf, val, chunk=chunk, keepdims=False)[0][1]) for name, val in zip(expr.fields, expr.values)] d = dict() for name, e in exprs: if isinstance(e, Reduction): d[name] = e elif isinstance(e, Summary): for n, v in zip(e.names, e.values): d[name + '_' + n] = v else: raise NotImplementedError() return summary(keepdims=keepdims, **d) @dispatch(Summary) def _split_agg(expr, leaf=None, chunk=None, agg=None, keepdims=True): exprs = [(name, split(leaf, val, keepdims=False)[1]) for name, val in zip(expr.fields, expr.values)] d = dict() for name, (a, ae) in exprs: if isscalar(a.dshape.measure): # For simple reductions d[name] = ae._subs({a: agg[name]}) elif isrecord(a.dshape.measure): # For reductions like mean/var names = ['%s_%s' % (name, field) for field in a.fields] namedict = dict(zip(a.fields, names)) d[name] = ae._subs(toolz.merge({a: agg}, namedict)) return summary(**d) @dispatch(By) def _split_chunk(expr, leaf=None, chunk=None, **kwargs): chunk_apply = _split_chunk(expr.apply, leaf=leaf, chunk=chunk, keepdims=False) chunk_grouper = expr.grouper._subs({leaf: chunk}) return by(chunk_grouper, chunk_apply) @dispatch(By) def _split_agg(expr, leaf=None, agg=None): agg_apply = _split_agg(expr.apply, leaf=leaf, agg=agg) agg_grouper = expr.grouper._subs({leaf: agg}) ngroup = len(expr.grouper.fields) if isscalar(expr.grouper.dshape.measure): agg_grouper = agg[agg.fields[0]] else: agg_grouper = agg[list(agg.fields[:ngroup])] return (by(agg_grouper, agg_apply) .relabel(dict(zip(agg.fields[:ngroup], expr.fields[:ngroup])))) @dispatch((ElemWise, Like, Selection)) def _split_chunk(expr, leaf=None, chunk=None, **kwargs): return expr._subs({leaf: chunk}) @dispatch((ElemWise, Like, Selection)) def _split_agg(expr, leaf=None, agg=None): return agg @dispatch(Apply) def _split_chunk(expr, leaf=None, chunk=None, **kwargs): if expr._splittable: return expr._subs({leaf: chunk}) else: raise NotImplementedError() @dispatch(Apply) def _split_agg(expr, leaf=None, agg=None): return agg from datashape import Fixed from math import ceil def dimension_div(a, b): """ How many times does b fit into a? >>> dimension_div(10, 5) 2 We round up >>> dimension_div(20, 9) 3 In the case of datashape.var, we resort to var >>> from datashape import var >>> dimension_div(var, 5) Var() >>> dimension_div(50, var) Var() """ if a == datashape.var or b == datashape.var: return datashape.var if isinstance(a, Fixed): a = int(a) if isinstance(b, Fixed): b = int(b) return int(ceil(a / b)) def dimension_mul(a, b): """ Given b number of a's how big is our dimension? >>> dimension_mul(2, 5) 10 We round up >>> dimension_mul(9, 3) 27 In the case of datashape.var, we resort to var >>> from datashape import var >>> dimension_mul(datashape.var, 5) Var() >>> dimension_mul(10, datashape.var) Var() """ if a == datashape.var or b == datashape.var: return datashape.var if isinstance(a, Fixed): a = int(a) if isinstance(b, Fixed): b = int(b) return int(a * b) def aggregate_shape(leaf, expr, chunk, chunk_expr): """ The shape of the intermediate aggregate >>> leaf = symbol('leaf', '10 * 10 * int') >>> expr = leaf.sum(axis=0) >>> chunk = symbol('chunk', '3 * 3 * int') # 3 does not divide 10 >>> chunk_expr = chunk.sum(axis=0, keepdims=True) >>> aggregate_shape(leaf, expr, chunk, chunk_expr) (4, 10) """ if datashape.var in tconcat(map(shape, [leaf, expr, chunk, chunk_expr])): return (datashape.var, ) * leaf.ndim numblocks = [int(floor(l / c)) for l, c in zip(leaf.shape, chunk.shape)] last_chunk_shape = [l % c for l, c in zip(leaf.shape, chunk.shape)] if builtins.sum(last_chunk_shape) != 0: old = last_chunk_shape last_chunk = symbol(chunk._name, DataShape(*(last_chunk_shape + [chunk.dshape.measure]))) last_chunk_expr = chunk_expr._subs({chunk: last_chunk}) last_chunk_shape = shape(last_chunk_expr) # Keep zeros if they were there before last_chunk_shape = tuple(a if b != 0 else 0 for a, b in zip(last_chunk_shape, old)) return tuple(int(floor(l / c)) * ce + lce for l, c, ce, lce in zip(shape(leaf), shape(chunk), shape(chunk_expr), last_chunk_shape))
from itertools import product import pytest from blaze.expr import symbol, summary from datashape import dshape def test_reduction_dshape(): x = symbol('x', '5 * 3 * float32') assert x.sum().dshape == dshape('float64') assert x.sum(axis=0).dshape == dshape('3 * float64') assert x.sum(axis=1).dshape == dshape('5 * float64') assert x.sum(axis=(0, 1)).dshape == dshape('float64') def test_keepdims(): x = symbol('x', '5 * 3 * float32') assert x.sum(axis=0, keepdims=True).dshape == dshape('1 * 3 * float64') assert x.sum(axis=1, keepdims=True).dshape == dshape('5 * 1 * float64') assert x.sum(axis=(0, 1), keepdims=True).dshape == dshape( '1 * 1 * float64') assert x.std(axis=0, keepdims=True).shape == (1, 3) def test_summary_keepdims(): x = symbol('x', '5 * 3 * float32') assert summary(a=x.min(), b=x.max()).dshape == \ dshape('{a: float32, b: float32}') assert summary(a=x.min(), b=x.max(), keepdims=True).dshape == \ dshape('1 * 1 * {a: float32, b: float32}') def test_summary_axis(): x = symbol('x', '5 * 3 * float32') assert summary(a=x.min(), b=x.max(), axis=0).dshape == \ dshape('3 * {a: float32, b: float32}') assert summary(a=x.min(), b=x.max(), axis=1).dshape == \ dshape('5 * {a: float32, b: float32}') assert summary(a=x.min(), b=x.max(), axis=1, keepdims=True).dshape == \ dshape('5 * 1 * {a: float32, b: float32}') def test_summary_str(): x = symbol('x', '5 * 3 * float32') assert 'keepdims' not in str(summary(a=x.min(), b=x.max())) def test_axis_kwarg_is_normalized_to_tuple(): x = symbol('x', '5 * 3 * float32') exprs = [x.sum(), x.sum(axis=1), x.sum(axis=[1]), x.std(), x.mean(axis=1)] for expr in exprs: assert isinstance(expr.axis, tuple) def test_summary_with_multiple_children(): t = symbol('t', 'var * {x: int, y: int, z: int}') assert summary(a=t.x.sum() + t.y.sum())._child.isidentical(t) def test_dir(): t = symbol('t', '10 * int') assert 'mean' in dir(t) t = symbol('t', 'int') assert 'mean' not in dir(t) def test_norms(): x = symbol('x', '5 * 3 * float32') assert x.vnorm().isidentical(x.vnorm('fro')) assert x.vnorm().isidentical(x.vnorm(2)) assert x.vnorm(axis=0).shape == (3,) assert x.vnorm(axis=0, keepdims=True).shape == (1, 3) @pytest.mark.parametrize('reduc', ['max', 'min', 'sum', 'mean', 'std', 'var']) def test_reductions_on_record_dshape(reduc): t = symbol('t', '10 * {a: int64, b: string}') with pytest.raises(AttributeError): getattr(t, reduc) @pytest.mark.parametrize('reduc', ['max', 'min', 'sum', 'mean', 'std', 'var']) def test_boolean_has_reductions(reduc): assert hasattr(symbol('t', 'var * bool'), reduc) @pytest.mark.parametrize(['reduc', 'measure'], product(['max', 'min'], ['date', 'datetime', 'timedelta'])) def test_max_min_on_datetime_and_timedelta(reduc, measure): assert hasattr(symbol('t', 'var * %s' % measure), reduc) def test_reduction_naming_with_generated_leaves(): assert symbol('_', 'var * float64').sum()._name == 'sum'
maxalbert/blaze
blaze/expr/tests/test_reductions.py
blaze/expr/split.py
from __future__ import absolute_import import datetime import json import logging import os.path import sys from pip._vendor import lockfile from pip._vendor.packaging import version as packaging_version from pip.compat import total_seconds, WINDOWS from pip.index import PyPI from pip.locations import USER_CACHE_DIR, running_under_virtualenv from pip.utils import ensure_dir, get_installed_version from pip.utils.filesystem import check_path_owner SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" logger = logging.getLogger(__name__) class VirtualenvSelfCheckState(object): def __init__(self): self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json") # Load the existing state try: with open(self.statefile_path) as statefile: self.state = json.load(statefile) except (IOError, ValueError): self.state = {} def save(self, pypi_version, current_time): # Attempt to write out our version check file with open(self.statefile_path, "w") as statefile: json.dump( { "last_check": current_time.strftime(SELFCHECK_DATE_FMT), "pypi_version": pypi_version, }, statefile, sort_keys=True, separators=(",", ":") ) class GlobalSelfCheckState(object): def __init__(self): self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json") # Load the existing state try: with open(self.statefile_path) as statefile: self.state = json.load(statefile)[sys.prefix] except (IOError, ValueError, KeyError): self.state = {} def save(self, pypi_version, current_time): # Check to make sure that we own the directory if not check_path_owner(os.path.dirname(self.statefile_path)): return # Now that we've ensured the directory is owned by this user, we'll go # ahead and make sure that all our directories are created. ensure_dir(os.path.dirname(self.statefile_path)) # Attempt to write out our version check file with lockfile.LockFile(self.statefile_path): if os.path.exists(self.statefile_path): with open(self.statefile_path) as statefile: state = json.load(statefile) else: state = {} state[sys.prefix] = { "last_check": current_time.strftime(SELFCHECK_DATE_FMT), "pypi_version": pypi_version, } with open(self.statefile_path, "w") as statefile: json.dump(state, statefile, sort_keys=True, separators=(",", ":")) def load_selfcheck_statefile(): if running_under_virtualenv(): return VirtualenvSelfCheckState() else: return GlobalSelfCheckState() def pip_version_check(session): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path. """ pip_version = packaging_version.parse(get_installed_version('pip')) pypi_version = None try: state = load_selfcheck_statefile() current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( state.state["last_check"], SELFCHECK_DATE_FMT ) if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: resp = session.get( PyPI.pip_json_url, headers={"Accept": "application/json"}, ) resp.raise_for_status() pypi_version = [ v for v in sorted( list(resp.json()["releases"]), key=packaging_version.parse, ) if not packaging_version.parse(v).is_prerelease ][-1] # save that we've performed a check state.save(pypi_version, current_time) remote_version = packaging_version.parse(pypi_version) # Determine if our pypi_version is older if (pip_version < remote_version and pip_version.base_version != remote_version.base_version): # Advise "python -m pip" on Windows to avoid issues # with overwriting pip.exe. if WINDOWS: pip_cmd = "python -m pip" else: pip_cmd = "pip" logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the " "'%s install --upgrade pip' command." % (pip_version, pypi_version, pip_cmd) ) except Exception: logger.debug( "There was an error checking the latest version of pip", exc_info=True, )
import sys import datetime import os from contextlib import contextmanager import freezegun import pytest import pretend from pip._vendor import lockfile from pip.utils import outdated @pytest.mark.parametrize( ['stored_time', 'newver', 'check', 'warn'], [ ('1970-01-01T10:00:00Z', '2.0', True, True), ('1970-01-01T10:00:00Z', '1.0', True, False), ('1970-01-06T10:00:00Z', '1.0', False, False), ('1970-01-06T10:00:00Z', '2.0', False, True), ] ) def test_pip_version_check(monkeypatch, stored_time, newver, check, warn): monkeypatch.setattr(outdated, 'get_installed_version', lambda name: '1.0') resp = pretend.stub( raise_for_status=pretend.call_recorder(lambda: None), json=pretend.call_recorder(lambda: {"releases": {newver: {}}}), ) session = pretend.stub( get=pretend.call_recorder(lambda u, headers=None: resp), ) fake_state = pretend.stub( state={"last_check": stored_time, 'pypi_version': '1.0'}, save=pretend.call_recorder(lambda v, t: None), ) monkeypatch.setattr( outdated, 'load_selfcheck_statefile', lambda: fake_state ) monkeypatch.setattr(outdated.logger, 'warning', pretend.call_recorder(lambda s: None)) monkeypatch.setattr(outdated.logger, 'debug', pretend.call_recorder(lambda s, exc_info=None: None)) with freezegun.freeze_time( "1970-01-09 10:00:00", ignore=[ "six.moves", "pip._vendor.six.moves", "pip._vendor.requests.packages.urllib3.packages.six.moves", ]): outdated.pip_version_check(session) assert not outdated.logger.debug.calls if check: assert session.get.calls == [pretend.call( "https://pypi.python.org/pypi/pip/json", headers={"Accept": "application/json"} )] assert fake_state.save.calls == [ pretend.call(newver, datetime.datetime(1970, 1, 9, 10, 00, 00)), ] if warn: assert len(outdated.logger.warning.calls) == 1 else: assert len(outdated.logger.warning.calls) == 0 else: assert session.get.calls == [] assert fake_state.save.calls == [] def test_virtualenv_state(monkeypatch): CONTENT = '{"last_check": "1970-01-02T11:00:00Z", "pypi_version": "1.0"}' fake_file = pretend.stub( read=pretend.call_recorder(lambda: CONTENT), write=pretend.call_recorder(lambda s: None), ) @pretend.call_recorder @contextmanager def fake_open(filename, mode='r'): yield fake_file monkeypatch.setattr(outdated, 'open', fake_open, raising=False) monkeypatch.setattr(outdated, 'running_under_virtualenv', pretend.call_recorder(lambda: True)) monkeypatch.setattr(sys, 'prefix', 'virtually_env') state = outdated.load_selfcheck_statefile() state.save('2.0', datetime.datetime.utcnow()) assert len(outdated.running_under_virtualenv.calls) == 1 expected_path = os.path.join('virtually_env', 'pip-selfcheck.json') assert fake_open.calls == [ pretend.call(expected_path), pretend.call(expected_path, 'w'), ] # json.dumps will call this a number of times assert len(fake_file.write.calls) def test_global_state(monkeypatch): CONTENT = '''{"pip_prefix": {"last_check": "1970-01-02T11:00:00Z", "pypi_version": "1.0"}}''' fake_file = pretend.stub( read=pretend.call_recorder(lambda: CONTENT), write=pretend.call_recorder(lambda s: None), ) @pretend.call_recorder @contextmanager def fake_open(filename, mode='r'): yield fake_file monkeypatch.setattr(outdated, 'open', fake_open, raising=False) @pretend.call_recorder @contextmanager def fake_lock(filename): yield monkeypatch.setattr(outdated, "check_path_owner", lambda p: True) monkeypatch.setattr(lockfile, 'LockFile', fake_lock) monkeypatch.setattr(os.path, "exists", lambda p: True) monkeypatch.setattr(outdated, 'running_under_virtualenv', pretend.call_recorder(lambda: False)) monkeypatch.setattr(outdated, 'USER_CACHE_DIR', 'cache_dir') monkeypatch.setattr(sys, 'prefix', 'pip_prefix') state = outdated.load_selfcheck_statefile() state.save('2.0', datetime.datetime.utcnow()) assert len(outdated.running_under_virtualenv.calls) == 1 expected_path = os.path.join('cache_dir', 'selfcheck.json') assert fake_lock.calls == [pretend.call(expected_path)] assert fake_open.calls == [ pretend.call(expected_path), pretend.call(expected_path), pretend.call(expected_path, 'w'), ] # json.dumps will call this a number of times assert len(fake_file.write.calls)
squidsoup/pip
tests/unit/test_unit_outdated.py
pip/utils/outdated.py
""" Monkey-patch the standard library to backport certain features to older Python versions """ from . import patcher def patch_time(): time = patcher.original('time') if not hasattr(time, 'monotonic'): time.monotonic = time.time import time as new_time if not hasattr(new_time, 'monotonic'): time.monotonic = time.time def patch(): patch_time()
import errno import gc import socket import sys import pytest from guv import spawn from guv.event import Event from guv.greenio import socket as green_socket from guv.green import socket as socket_patched from guv.support import get_errno pyversion = sys.version_info[:2] TIMEOUT_SMALL = 0.01 BACKLOG = 10 def resize_buffer(sock, size): sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size) class TestGreenSocket: def test_socket_init(self): sock = socket_patched.socket() assert isinstance(sock, green_socket) def test_socket_close(self, gsock): gsock.close() def test_connect(self, gsock, pub_addr): gsock.connect(pub_addr) print(gsock.getpeername()) assert gsock.getpeername() def test_connect_timeout(self, gsock, fail_addr): gsock.settimeout(TIMEOUT_SMALL) with pytest.raises(socket.timeout): gsock.connect(fail_addr) def test_connect_ex_timeout(self, gsock, fail_addr): gsock.settimeout(TIMEOUT_SMALL) e = gsock.connect_ex(fail_addr) if e not in {errno.EHOSTUNREACH, errno.ENETUNREACH}: assert e == errno.EAGAIN def test_accept_timeout(self, gsock): gsock.settimeout(TIMEOUT_SMALL) gsock.bind(('', 0)) gsock.listen(BACKLOG) with pytest.raises(socket.timeout): gsock.accept() def test_recv_timeout(self, gsock, pub_addr): gsock.connect(pub_addr) gsock.settimeout(TIMEOUT_SMALL) with pytest.raises(socket.timeout) as exc_info: gsock.recv(8192) assert exc_info.value.args[0] == 'timed out' def test_send_timeout(self, gsock, server_sock): resize_buffer(server_sock, 1) evt = Event() def server(): client_sock, addr = server_sock.accept() resize_buffer(client_sock, 1) evt.wait() g = spawn(server) server_addr = server_sock.getsockname() resize_buffer(gsock, 1) gsock.connect(server_addr) gsock.settimeout(TIMEOUT_SMALL) with pytest.raises(socket.timeout): # large enough data to overwhelm most buffers msg_len = 10 ** 6 sent = 0 while sent < msg_len: sent += gsock.send(bytes(msg_len)) evt.send() g.wait() def test_send_to_closed_sock_raises(self, gsock): try: gsock.send(b'hello') except socket.error as e: assert get_errno(e) == errno.EPIPE if pyversion >= (3, 3): # on python 3.3+, the exception can be caught like this as well with pytest.raises(BrokenPipeError): gsock.send(b'hello') def test_del_closes_socket(self, gsock, server_sock): def accept_once(sock): # delete/overwrite the original conn object, only keeping the file object around # closing the file object should close everything try: client_sock, addr = sock.accept() file = client_sock.makefile('wb') del client_sock file.write(b'hello\n') file.close() gc.collect() with pytest.raises(ValueError): file.write(b'a') finally: sock.close() killer = spawn(accept_once, server_sock) gsock.connect(('127.0.0.1', server_sock.getsockname()[1])) f = gsock.makefile('rb') gsock.close() assert f.read() == b'hello\n' assert f.read() == b'' killer.wait() class TestGreenSocketModule: def test_create_connection(self, pub_addr): sock = socket_patched.create_connection(pub_addr) assert sock def test_create_connection_timeout_error(self, fail_addr): # Inspired by eventlet Greenio_test try: socket_patched.create_connection(fail_addr, timeout=0.01) pytest.fail('Timeout not raised') except socket.timeout as e: assert str(e) == 'timed out' except socket.error as e: # unreachable is also a valid outcome if not get_errno(e) in (errno.EHOSTUNREACH, errno.ENETUNREACH): raise
veegee/guv
tests/test_greenio.py
guv/compat.py
""" parquet compat """ from warnings import catch_warnings from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas import DataFrame, get_option from pandas.io.common import get_filepath_or_buffer, is_s3_url def get_engine(engine): """ return our implementation """ if engine == "auto": engine = get_option("io.parquet.engine") if engine == "auto": # try engines in this order try: return PyArrowImpl() except ImportError: pass try: return FastParquetImpl() except ImportError: pass raise ImportError( "Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "pyarrow or fastparquet is required for parquet " "support" ) if engine not in ["pyarrow", "fastparquet"]: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") if engine == "pyarrow": return PyArrowImpl() elif engine == "fastparquet": return FastParquetImpl() class BaseImpl: api = None # module @staticmethod def validate_dataframe(df): if not isinstance(df, DataFrame): raise ValueError("to_parquet only supports IO with DataFrames") # must have value column names (strings only) if df.columns.inferred_type not in {"string", "unicode", "empty"}: raise ValueError("parquet must have string column names") # index level names must be strings valid_names = all( isinstance(name, str) for name in df.index.names if name is not None ) if not valid_names: raise ValueError("Index level names must be strings") def write(self, df, path, compression, **kwargs): raise AbstractMethodError(self) def read(self, path, columns=None, **kwargs): raise AbstractMethodError(self) class PyArrowImpl(BaseImpl): def __init__(self): pyarrow = import_optional_dependency( "pyarrow", extra="pyarrow is required for parquet support." ) import pyarrow.parquet self.api = pyarrow def write( self, df, path, compression="snappy", coerce_timestamps="ms", index=None, partition_cols=None, **kwargs ): self.validate_dataframe(df) path, _, _, _ = get_filepath_or_buffer(path, mode="wb") if index is None: from_pandas_kwargs = {} else: from_pandas_kwargs = {"preserve_index": index} table = self.api.Table.from_pandas(df, **from_pandas_kwargs) if partition_cols is not None: self.api.parquet.write_to_dataset( table, path, compression=compression, coerce_timestamps=coerce_timestamps, partition_cols=partition_cols, **kwargs ) else: self.api.parquet.write_table( table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs ) def read(self, path, columns=None, **kwargs): path, _, _, should_close = get_filepath_or_buffer(path) kwargs["use_pandas_metadata"] = True result = self.api.parquet.read_table( path, columns=columns, **kwargs ).to_pandas() if should_close: try: path.close() except: # noqa: flake8 pass return result class FastParquetImpl(BaseImpl): def __init__(self): # since pandas is a dependency of fastparquet # we need to import on first use fastparquet = import_optional_dependency( "fastparquet", extra="fastparquet is required for parquet support." ) self.api = fastparquet def write( self, df, path, compression="snappy", index=None, partition_cols=None, **kwargs ): self.validate_dataframe(df) # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. if "partition_on" in kwargs and partition_cols is not None: raise ValueError( "Cannot use both partition_on and " "partition_cols. Use partition_cols for " "partitioning data" ) elif "partition_on" in kwargs: partition_cols = kwargs.pop("partition_on") if partition_cols is not None: kwargs["file_scheme"] = "hive" if is_s3_url(path): # path is s3:// so we need to open the s3file in 'wb' mode. # TODO: Support 'ab' path, _, _, _ = get_filepath_or_buffer(path, mode="wb") # And pass the opened s3file to the fastparquet internal impl. kwargs["open_with"] = lambda path, _: path else: path, _, _, _ = get_filepath_or_buffer(path) with catch_warnings(record=True): self.api.write( path, df, compression=compression, write_index=index, partition_on=partition_cols, **kwargs ) def read(self, path, columns=None, **kwargs): if is_s3_url(path): # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. s3, _, _, should_close = get_filepath_or_buffer(path) try: parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) finally: s3.close() else: path, _, _, _ = get_filepath_or_buffer(path) parquet_file = self.api.ParquetFile(path) return parquet_file.to_pandas(columns=columns, **kwargs) def to_parquet( df, path, engine="auto", compression="snappy", index=None, partition_cols=None, **kwargs ): """ Write a DataFrame to the parquet format. Parameters ---------- path : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine """ impl = get_engine(engine) return impl.write( df, path, compression=compression, index=index, partition_cols=partition_cols, **kwargs ) def read_parquet(path, engine="auto", columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. .. versionadded:: 0.21.0 Parameters ---------- path : str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded:: 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) return impl.read(path, columns=columns, **kwargs)
import re import numpy as np import pytest from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike import pandas as pd from pandas import IntervalIndex, MultiIndex, RangeIndex import pandas.util.testing as tm def test_labels_dtypes(): # GH 8456 i = MultiIndex.from_tuples([("A", 1), ("A", 2)]) assert i.codes[0].dtype == "int8" assert i.codes[1].dtype == "int8" i = MultiIndex.from_product([["a"], range(40)]) assert i.codes[1].dtype == "int8" i = MultiIndex.from_product([["a"], range(400)]) assert i.codes[1].dtype == "int16" i = MultiIndex.from_product([["a"], range(40000)]) assert i.codes[1].dtype == "int32" i = pd.MultiIndex.from_product([["a"], range(1000)]) assert (i.codes[0] >= 0).all() assert (i.codes[1] >= 0).all() def test_values_boxed(): tuples = [ (1, pd.Timestamp("2000-01-01")), (2, pd.NaT), (3, pd.Timestamp("2000-01-03")), (1, pd.Timestamp("2000-01-04")), (2, pd.Timestamp("2000-01-02")), (3, pd.Timestamp("2000-01-03")), ] result = pd.MultiIndex.from_tuples(tuples) expected = construct_1d_object_array_from_listlike(tuples) tm.assert_numpy_array_equal(result.values, expected) # Check that code branches for boxed values produce identical results tm.assert_numpy_array_equal(result.values[:4], result[:4].values) def test_values_multiindex_datetimeindex(): # Test to ensure we hit the boxing / nobox part of MI.values ints = np.arange(10 ** 18, 10 ** 18 + 5) naive = pd.DatetimeIndex(ints) # TODO(GH-24559): Remove the FutureWarning with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): aware = pd.DatetimeIndex(ints, tz="US/Central") idx = pd.MultiIndex.from_arrays([naive, aware]) result = idx.values outer = pd.DatetimeIndex([x[0] for x in result]) tm.assert_index_equal(outer, naive) inner = pd.DatetimeIndex([x[1] for x in result]) tm.assert_index_equal(inner, aware) # n_lev > n_lab result = idx[:2].values outer = pd.DatetimeIndex([x[0] for x in result]) tm.assert_index_equal(outer, naive[:2]) inner = pd.DatetimeIndex([x[1] for x in result]) tm.assert_index_equal(inner, aware[:2]) def test_values_multiindex_periodindex(): # Test to ensure we hit the boxing / nobox part of MI.values ints = np.arange(2007, 2012) pidx = pd.PeriodIndex(ints, freq="D") idx = pd.MultiIndex.from_arrays([ints, pidx]) result = idx.values outer = pd.Int64Index([x[0] for x in result]) tm.assert_index_equal(outer, pd.Int64Index(ints)) inner = pd.PeriodIndex([x[1] for x in result]) tm.assert_index_equal(inner, pidx) # n_lev > n_lab result = idx[:2].values outer = pd.Int64Index([x[0] for x in result]) tm.assert_index_equal(outer, pd.Int64Index(ints[:2])) inner = pd.PeriodIndex([x[1] for x in result]) tm.assert_index_equal(inner, pidx[:2]) def test_consistency(): # need to construct an overflow major_axis = list(range(70000)) minor_axis = list(range(10)) major_codes = np.arange(70000) minor_codes = np.repeat(range(10), 7000) # the fact that is works means it's consistent index = MultiIndex( levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] ) # inconsistent major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) index = MultiIndex( levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] ) assert index.is_unique is False def test_hash_collisions(): # non-smoke test that we don't get hash collisions index = MultiIndex.from_product( [np.arange(1000), np.arange(1000)], names=["one", "two"] ) result = index.get_indexer(index.values) tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp")) for i in [0, 1, len(index) - 2, len(index) - 1]: result = index.get_loc(index[i]) assert result == i def test_dims(): pass def take_invalid_kwargs(): vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] idx = pd.MultiIndex.from_product(vals, names=["str", "dt"]) indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" with pytest.raises(TypeError, match=msg): idx.take(indices, foo=2) msg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, out=indices) msg = "the 'mode' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, mode="clip") def test_isna_behavior(idx): # should not segfault GH5123 # NOTE: if MI representation changes, may make sense to allow # isna(MI) msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): pd.isna(idx) def test_large_multiindex_error(): # GH12527 df_below_1000000 = pd.DataFrame( 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"] ) with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_below_1000000.loc[(-1, 0), "dest"] with pytest.raises(KeyError, match=r"^\(3, 0\)$"): df_below_1000000.loc[(3, 0), "dest"] df_above_1000000 = pd.DataFrame( 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"] ) with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_above_1000000.loc[(-1, 0), "dest"] with pytest.raises(KeyError, match=r"^\(3, 0\)$"): df_above_1000000.loc[(3, 0), "dest"] def test_million_record_attribute_error(): # GH 18165 r = list(range(1000000)) df = pd.DataFrame( {"a": r, "b": r}, index=pd.MultiIndex.from_tuples([(x, x) for x in r]) ) msg = "'Series' object has no attribute 'foo'" with pytest.raises(AttributeError, match=msg): df["a"].foo() def test_can_hold_identifiers(idx): key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is True def test_metadata_immutable(idx): levels, codes = idx.levels, idx.codes # shouldn't be able to set at either the top level or base level mutable_regex = re.compile("does not support mutable operations") with pytest.raises(TypeError, match=mutable_regex): levels[0] = levels[0] with pytest.raises(TypeError, match=mutable_regex): levels[0][0] = levels[0][0] # ditto for labels with pytest.raises(TypeError, match=mutable_regex): codes[0] = codes[0] with pytest.raises(TypeError, match=mutable_regex): codes[0][0] = codes[0][0] # and for names names = idx.names with pytest.raises(TypeError, match=mutable_regex): names[0] = names[0] def test_level_setting_resets_attributes(): ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) assert ind.is_monotonic ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True) # if this fails, probably didn't reset the cache correctly. assert not ind.is_monotonic def test_rangeindex_fallback_coercion_bug(): # GH 12893 foo = pd.DataFrame(np.arange(100).reshape((10, 10))) bar = pd.DataFrame(np.arange(100).reshape((10, 10))) df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1) df.index.names = ["fizz", "buzz"] str(df) expected = pd.DataFrame( {"bar": np.arange(100), "foo": np.arange(100)}, index=pd.MultiIndex.from_product( [range(10), range(10)], names=["fizz", "buzz"] ), ) tm.assert_frame_equal(df, expected, check_like=True) result = df.index.get_level_values("fizz") expected = pd.Int64Index(np.arange(10), name="fizz").repeat(10) tm.assert_index_equal(result, expected) result = df.index.get_level_values("buzz") expected = pd.Int64Index(np.tile(np.arange(10), 10), name="buzz") tm.assert_index_equal(result, expected) def test_hash_error(indices): index = indices with pytest.raises( TypeError, match=("unhashable type: {0.__name__!r}".format(type(index))) ): hash(indices) def test_mutability(indices): if not len(indices): return msg = "Index does not support mutable operations" with pytest.raises(TypeError, match=msg): indices[0] = indices[0] def test_wrong_number_names(indices): with pytest.raises(ValueError, match="^Length"): indices.names = ["apple", "banana", "carrot"] def test_memory_usage(idx): result = idx.memory_usage() if len(idx): idx.get_loc(idx[0]) result2 = idx.memory_usage() result3 = idx.memory_usage(deep=True) # RangeIndex, IntervalIndex # don't have engines if not isinstance(idx, (RangeIndex, IntervalIndex)): assert result2 > result if idx.inferred_type == "object": assert result3 > result2 else: # we report 0 for no-length assert result == 0 def test_nlevels(idx): assert idx.nlevels == 2
toobaz/pandas
pandas/tests/indexes/multi/test_integrity.py
pandas/io/parquet.py
import numpy as np import pandas as pd try: from vispy.io import write_mesh except ImportError: print("You need vispy to use the .OBJ export") import logging logger = logging.getLogger(name=__name__) def save_triangulated(filename, eptm): vertices, faces = eptm.triangular_mesh(eptm.coords, False) write_mesh( filename, vertices=vertices, faces=faces, normals=None, texcoords=None, overwrite=True, ) logger.info("Saved %s as a trianglulated .OBJ file", eptm.identifier) def save_junction_mesh(filename, eptm): vertices, faces, normals = eptm.vertex_mesh(eptm.coords, vertex_normals=True) write_mesh( filename, vertices=vertices, faces=faces, normals=normals, texcoords=None, overwrite=True, reshape_faces=False, ) # GH 1155 logger.info("Saved %s as a junction mesh .OBJ file", eptm.identifier) def write_splitted_cells(*args, **kwargs): logger.warning("Deprecated, use `save_splitted_cells` instead") save_splitted_cells(*args, **kwargs) def save_splitted_cells(fname, sheet, epsilon=0.1): coords = sheet.coords up_srce = sheet.upcast_srce(sheet.vert_df[coords]) up_trgt = sheet.upcast_trgt(sheet.vert_df[coords]) up_face = sheet.upcast_face(sheet.face_df[coords]) up_srce = (up_srce - up_face) * (1 - epsilon) + up_face up_trgt = (up_trgt - up_face) * (1 - epsilon) + up_face cell_faces = pd.concat([sheet.face_df[coords], up_srce, up_trgt], ignore_index=True) Ne, Nf = sheet.Ne, sheet.Nf triangles = np.vstack( [sheet.edge_df["face"], np.arange(Ne) + Nf, np.arange(Ne) + Ne + Nf] ).T write_mesh( fname, cell_faces.values, triangles, normals=None, texcoords=None, overwrite=True, )
import pandas as pd import pytest from pathlib import Path from tyssue import Sheet, SheetGeometry from tyssue.io import hdf5 from tyssue import collisions from tyssue.collisions import solvers from tyssue.stores import stores_dir def test_detection(): sheet = Sheet("crossed", hdf5.load_datasets(Path(stores_dir) / "sheet6x5.hf5")) sheet.vert_df.z = 5 * sheet.vert_df.x ** 2 # sheet.vert_df[sheet.coords] += np.random.normal(scale=0.001, size=(sheet.Nv, 3)) SheetGeometry.update_all(sheet) sheet.vert_df.x -= 35 * (sheet.vert_df.x / 2) ** 3 SheetGeometry.update_all(sheet) colliding_edges = set(collisions.self_intersections(sheet).flatten()) expected = {32, 1, 34, 9, 35} assert colliding_edges == expected def test_solving(): sheet = Sheet("crossed", hdf5.load_datasets(Path(stores_dir) / "sheet6x5.hf5")) sheet.vert_df.z = 5 * sheet.vert_df.x ** 2 SheetGeometry.update_all(sheet) positions_buffer = sheet.vert_df[sheet.coords].copy() sheet.vert_df.x -= 35 * (sheet.vert_df.x / 2) ** 3 SheetGeometry.update_all(sheet) colliding_edges = collisions.self_intersections(sheet) boxes = solvers.CollidingBoxes(sheet, positions_buffer, colliding_edges) boxes.solve_collisions(shyness=0.01) assert collisions.self_intersections(sheet).size == 0 assert sheet.vert_df.loc[[22, 12], "x"].diff().loc[12] == 0.01 def test_already(): # GH111 sheet = Sheet("crossed", hdf5.load_datasets(Path(stores_dir) / "sheet6x5.hf5")) sheet.vert_df.z = 5 * sheet.vert_df.x ** 2 SheetGeometry.update_all(sheet) sheet.vert_df.x -= 35 * (sheet.vert_df.x / 2) ** 3 SheetGeometry.update_all(sheet) positions_buffer = sheet.vert_df[sheet.coords].copy() sheet.vert_df.x -= 0.1 * (sheet.vert_df.x / 2) ** 3 SheetGeometry.update_all(sheet) colliding_edges = collisions.self_intersections(sheet) boxes = solvers.CollidingBoxes(sheet, positions_buffer, colliding_edges) res = boxes.solve_collisions(shyness=0.01) colliding_edges = collisions.self_intersections(sheet) assert len(colliding_edges) == 0
CellModels/tyssue
tests/collisions/test_sheet_collision.py
tyssue/io/obj.py
# -*- encoding: utf-8 -*- from abjad import * def test_indicatortools_Clef_middle_c_position_01(): assert Clef('treble').middle_c_position == pitchtools.StaffPosition(-6) assert Clef('alto').middle_c_position == pitchtools.StaffPosition(0) assert Clef('tenor').middle_c_position == pitchtools.StaffPosition(2) assert Clef('bass').middle_c_position == pitchtools.StaffPosition(6) assert Clef('treble^8').middle_c_position == pitchtools.StaffPosition(-13) assert Clef('alto^15').middle_c_position == pitchtools.StaffPosition(-13) assert Clef('tenor_8').middle_c_position == pitchtools.StaffPosition(9) assert Clef('bass_15').middle_c_position == pitchtools.StaffPosition(19)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/indicatortools/test/test_indicatortools_Clef_middle_c_position.py
# -*- encoding: utf-8 -*- from abjad.tools import indicatortools from abjad.tools import pitchtools from abjad.tools import scoretools from abjad.tools.topleveltools import iterate def iterate_out_of_range_notes_and_chords(expr): '''Iterates notes and chords in `expr` outside traditional instrument ranges: :: >>> staff = Staff("c'8 r8 <d fs>8 r8") >>> violin = instrumenttools.Violin() >>> attach(violin, staff) :: >>> list( ... instrumenttools.iterate_out_of_range_notes_and_chords( ... staff)) [Chord('<d fs>8')] Returns generator. ''' from abjad.tools import instrumenttools prototype = (scoretools.Note, scoretools.Chord) for note_or_chord in iterate(expr).by_class(prototype): instrument = note_or_chord._get_effective( instrumenttools.Instrument) if instrument is None: message = 'no instrument found.' raise ValueError(message) if note_or_chord not in instrument.pitch_range: yield note_or_chord
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/instrumenttools/iterate_out_of_range_notes_and_chords.py
# -*- encoding: utf-8 -*- from abjad.tools.datastructuretools.TreeNode import TreeNode class ReSTHorizontalRule(TreeNode): r'''A ReST horizontal rule. :: >>> rule = documentationtools.ReSTHorizontalRule() >>> rule ReSTHorizontalRule() :: >>> print(rule.rest_format) -------- ''' ### CLASS VARIABLES ### __documentation_section__ = 'reStructuredText' ### PRIVATE PROPERTIES ### @property def _rest_format_contributions(self): return ['--------'] ### PUBLIC PROPERTIES ### @property def rest_format(self): r'''ReST format of ReSt horizontal rule. Returns text. ''' return '\n'.join(self._rest_format_contributions)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/documentationtools/ReSTHorizontalRule.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_NumberedPitch_pitch_number_01(): assert pitchtools.NumberedPitch("cff''").pitch_number == 10 assert pitchtools.NumberedPitch("ctqf''").pitch_number == 10.5 assert pitchtools.NumberedPitch("cf''").pitch_number == 11 assert pitchtools.NumberedPitch("cqf''").pitch_number == 11.5 assert pitchtools.NumberedPitch("c''").pitch_number == 12 assert pitchtools.NumberedPitch("cqs''").pitch_number == 12.5 assert pitchtools.NumberedPitch("cs''").pitch_number == 13 assert pitchtools.NumberedPitch("ctqs''").pitch_number == 13.5 assert pitchtools.NumberedPitch("css''").pitch_number == 14 assert pitchtools.NumberedPitch("d''").pitch_number == 14
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_NumberedPitch_pitch_number.py
# -*- encoding: utf-8 -*- import functools from abjad.tools import durationtools from abjad.tools.schemetools.Scheme import Scheme @functools.total_ordering class SchemeMoment(Scheme): r'''A LilyPond scheme moment. Initializes with two integers: :: >>> moment = schemetools.SchemeMoment(1, 68) >>> moment SchemeMoment(1, 68) Scheme moments are immutable. ''' ### CLASS VARIABLES ### __slots__ = ( ) ### INITIALIZER ### def __init__(self, *args, **kwargs): if len(args) == 1 and durationtools.Duration.is_token(args[0]): args = durationtools.Duration(args[0]) elif len(args) == 1 and isinstance(args[0], type(self)): args = args[0].duration elif len(args) == 2 and \ isinstance(args[0], int) and isinstance(args[1], int): args = durationtools.Duration(args) elif len(args) == 0: args = durationtools.Duration((1, 4)) else: message = 'can not intialize {}: {!r}.' message = message.format(type(self).__name__, args) raise TypeError(message) Scheme.__init__(self, args, **kwargs) ### SPECIAL METHODS ### def __eq__(self, arg): r'''Is true when `arg` is a scheme moment with the same value as that of this scheme moment. :: >>> moment == schemetools.SchemeMoment(1, 68) True Otherwise false. >>> moment == schemetools.SchemeMoment(1, 54) False Returns boolean. ''' if isinstance(arg, type(self)): if self._value == arg._value: return True return False def __getnewargs__(self): r'''Gets new arguments. Returns tuple. ''' return (self._value,) def __hash__(self): r'''Hashes scheme moment. Required to be explicitly re-defined on Python 3 if __eq__ changes. Returns integer. ''' return super(SchemeMoment, self).__hash__() def __lt__(self, arg): r'''Is true when `arg` is a scheme moment with value greater than that of this scheme moment. :: >>> moment < schemetools.SchemeMoment(1, 32) True Otherwise false: :: >>> moment < schemetools.SchemeMoment(1, 78) False Returns boolean. ''' if isinstance(arg, type(self)): if self._value < arg._value: return True return False ### PRIVATE PROPERTIES ### @property def _formatted_value(self): numerator, denominator = self._value.numerator, self._value.denominator return '(ly:make-moment {} {})'.format(numerator, denominator) @property def _storage_format_specification(self): from abjad.tools import systemtools return systemtools.StorageFormatSpecification( self, positional_argument_values=( self._value.numerator, self._value.denominator, ), ) ### PUBLIC PROPERTIES ### @property def duration(self): r'''Duration of scheme moment. :: >>> scheme_moment = schemetools.SchemeMoment(1, 68) >>> scheme_moment.duration Duration(1, 68) Returns duration. ''' return self._value
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/schemetools/SchemeMoment.py
# -*- encoding: utf-8 -*- from abjad import * def configure_lilypond_file(lilypond_file): r'''Configures LilyPond file. ''' lilypond_file.global_staff_size = 8 context_block = lilypondfiletools.ContextBlock( source_context_name=r'Staff \RemoveEmptyStaves', ) override(context_block).vertical_axis_group.remove_first = True lilypond_file.layout_block.items.append(context_block) slash_separator = indicatortools.LilyPondCommand('slashSeparator') lilypond_file.paper_block.system_separator_markup = slash_separator bottom_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.bottom_margin = bottom_margin top_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.top_margin = top_margin left_margin = lilypondfiletools.LilyPondDimension(0.75, 'in') lilypond_file.paper_block.left_margin = left_margin right_margin = lilypondfiletools.LilyPondDimension(0.5, 'in') lilypond_file.paper_block.right_margin = right_margin paper_width = lilypondfiletools.LilyPondDimension(5.25, 'in') lilypond_file.paper_block.paper_width = paper_width paper_height = lilypondfiletools.LilyPondDimension(7.25, 'in') lilypond_file.paper_block.paper_height = paper_height lilypond_file.header_block.composer = markuptools.Markup('Arvo Pärt') title = 'Cantus in Memory of Benjamin Britten (1980)' lilypond_file.header_block.title = markuptools.Markup(title)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/demos/part/configure_lilypond_file.py
# -*- encoding: utf-8 -*- from abjad import * def test_selectiontools_Selection__get_component_01(): staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") assert select(staff)._get_component(Measure, 0) is staff[0] assert select(staff)._get_component(Measure, 1) is staff[1] assert select(staff)._get_component(Measure, 2) is staff[2] def test_selectiontools_Selection__get_component_02(): staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") assert select(staff)._get_component(Measure, -1) is staff[2] assert select(staff)._get_component(Measure, -2) is staff[1] assert select(staff)._get_component(Measure, -3) is staff[0] def test_selectiontools_Selection__get_component_03(): r'''Read forwards for positive n. ''' staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") r''' \new Staff { { \time 2/8 c'8 d'8 } { \time 2/8 e'8 f'8 } { \time 2/8 g'8 a'8 } } ''' assert select(staff)._get_component(scoretools.Leaf, 0) is staff[0][0] assert select(staff)._get_component(scoretools.Leaf, 1) is staff[0][1] assert select(staff)._get_component(scoretools.Leaf, 2) is staff[1][0] assert select(staff)._get_component(scoretools.Leaf, 3) is staff[1][1] assert select(staff)._get_component(scoretools.Leaf, 4) is staff[2][0] assert select(staff)._get_component(scoretools.Leaf, 5) is staff[2][1] def test_selectiontools_Selection__get_component_04(): r'''Read backwards for negative n. ''' staff = Staff("abj: | 2/8 c'8 d'8 || 2/8 e'8 f'8 || 2/8 g'8 a'8 |") r''' \new Staff { { \time 2/8 c'8 d'8 } { \time 2/8 e'8 f'8 } { \time 2/8 g'8 a'8 } } ''' assert select(staff)._get_component(scoretools.Leaf, -1) is staff[2][1] assert select(staff)._get_component(scoretools.Leaf, -2) is staff[2][0] assert select(staff)._get_component(scoretools.Leaf, -3) is staff[1][1] assert select(staff)._get_component(scoretools.Leaf, -4) is staff[1][0] assert select(staff)._get_component(scoretools.Leaf, -5) is staff[0][1] assert select(staff)._get_component(scoretools.Leaf, -6) is staff[0][0] def test_selectiontools_Selection__get_component_05(): staff = Staff(r''' c'16 r16 d'8 r8 e'8. r8. f'4 r4 ''') notes = [staff[0], staff[2], staff[4], staff[6]] rests = [staff[1], staff[3], staff[5], staff[7]] assert select(staff)._get_component(Note, 0) is notes[0] assert select(staff)._get_component(Note, 1) is notes[1] assert select(staff)._get_component(Note, 2) is notes[2] assert select(staff)._get_component(Note, 3) is notes[3] assert select(staff)._get_component(Rest, 0) is rests[0] assert select(staff)._get_component(Rest, 1) is rests[1] assert select(staff)._get_component(Rest, 2) is rests[2] assert select(staff)._get_component(Rest, 3) is rests[3] assert select(staff)._get_component(Staff, 0) is staff def test_selectiontools_Selection__get_component_06(): r'''Iterates backwards with negative values of n. ''' staff = Staff(r''' c'16 r16 d'8 r8 e'8. r8. f'4 r4 ''') notes = [staff[0], staff[2], staff[4], staff[6]] rests = [staff[1], staff[3], staff[5], staff[7]] assert select(staff)._get_component(Note, -1) is notes[3] assert select(staff)._get_component(Note, -2) is notes[2] assert select(staff)._get_component(Note, -3) is notes[1] assert select(staff)._get_component(Note, -4) is notes[0] assert select(staff)._get_component(Rest, -1) is rests[3] assert select(staff)._get_component(Rest, -2) is rests[2] assert select(staff)._get_component(Rest, -3) is rests[1] assert select(staff)._get_component(Rest, -4) is rests[0]
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/selectiontools/test/test_selectiontools_Selection__get_component.py
# -*- encoding: utf-8 -*- import sys from abjad import * def test_stringtools_strip_diacritics_01(): if sys.version_info[0] == 2: binary_string = 'Dvo\xc5\x99\xc3\xa1k' else: binary_string = 'Dvořák' ascii_string = stringtools.strip_diacritics(binary_string) assert ascii_string == 'Dvorak'
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/stringtools/test/test_stringtools_strip_diacritics.py
# -*- encoding: utf-8 -*- from abjad.tools import durationtools from abjad.tools import mathtools from abjad.tools import sequencetools from abjad.tools.pitchtools.Segment import Segment from abjad.tools.topleveltools import new class IntervalSegment(Segment): r'''An interval segment. :: >>> intervals = 'm2 M10 -aug4 P5' >>> pitchtools.IntervalSegment(intervals) IntervalSegment(['+m2', '+M10', '-aug4', '+P5']) :: >>> pitch_segment = pitchtools.PitchSegment("c d e f g a b c'") >>> pitchtools.IntervalSegment(pitch_segment) IntervalSegment(['+M2', '+M2', '+m2', '+M2', '+M2', '+M2', '+m2']) ''' ### CLASS VARIABLES ### __slots__ = () ### INITIALIZER ### def __init__( self, items=None, item_class=None, ): from abjad.tools import pitchtools if isinstance(items, pitchtools.PitchSegment): intervals = [] for one, two in sequencetools.iterate_sequence_nwise(items): intervals.append(one - two) items = intervals Segment.__init__( self, items=items, item_class=item_class, ) ### PRIVATE PROPERTIES ### @property def _named_item_class(self): from abjad.tools import pitchtools return pitchtools.NamedInterval @property def _numbered_item_class(self): from abjad.tools import pitchtools return pitchtools.NumberedInterval @property def _parent_item_class(self): from abjad.tools import pitchtools return pitchtools.Interval @property def _repr_specification(self): items = [] if self.item_class.__name__.startswith('Named'): items = [str(x) for x in self] else: items = [x.number for x in self] return new( self._storage_format_specification, is_indented=False, keyword_argument_names=(), positional_argument_values=( items, ), ) ### PUBLIC METHODS ### @classmethod def from_selection( cls, selection, item_class=None, ): r'''Makes interval segment from component `selection`. :: >>> staff = Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8") >>> pitchtools.IntervalSegment.from_selection( ... staff, item_class=pitchtools.NumberedInterval) IntervalSegment([2, 2, 1, 2, 2, 2, 1]) Returns interval segment. ''' from abjad.tools import pitchtools pitch_segment = pitchtools.PitchSegment.from_selection(selection) intervals = (-x for x in mathtools.difference_series(pitch_segment)) return cls( items=intervals, item_class=item_class, ) def rotate(self, n): r'''Rotates interval segment by `n`. Returns new interval segment. ''' return new(self, self[-n:] + self[:-n]) ### PUBLIC PROPERTIES ### @property def has_duplicates(self): r'''True if segment has duplicate items. Otherwise false. :: >>> intervals = 'm2 M3 -aug4 m2 P5' >>> segment = pitchtools.IntervalSegment(intervals) >>> segment.has_duplicates True :: >>> intervals = 'M3 -aug4 m2 P5' >>> segment = pitchtools.IntervalSegment(intervals) >>> segment.has_duplicates False Returns boolean. ''' from abjad.tools import pitchtools return len(pitchtools.IntervalSet(self)) < len(self) @property def slope(self): r'''Slope of interval segment. The slope of a interval segment is the sum of its intervals divided by its length: :: >>> pitchtools.IntervalSegment([1, 2]).slope Multiplier(3, 2) Returns multiplier. ''' return durationtools.Multiplier.from_float( sum([x.number for x in self])) / len(self) @property def spread(self): r'''Spread of interval segment. The maximum interval spanned by any combination of the intervals within a numbered interval segment. :: >>> pitchtools.IntervalSegment([1, 2, -3, 1, -2, 1]).spread NumberedInterval(4.0) :: >>> pitchtools.IntervalSegment([1, 1, 1, 2, -3, -2]).spread NumberedInterval(5.0) Returns numbered interval. ''' from abjad.tools import pitchtools current = maximum = minimum = 0 for x in self: current += float(x) if maximum < current: maximum = current if current < minimum: minimum = current return pitchtools.NumberedInterval(maximum - minimum)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/IntervalSegment.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_yield_all_pitch_class_sets_01(): U_star = pitchtools.yield_all_pitch_class_sets() assert len(U_star) == 4096 assert pitchtools.PitchClassSet([0, 1, 2]) in U_star assert pitchtools.PitchClassSet([1, 2, 3]) in U_star assert pitchtools.PitchClassSet([3, 4, 8, 9, 11]) in U_star assert pitchtools.PitchClassSet(range(12)) in U_star
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_yield_all_pitch_class_sets.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_NumberedPitchClass___add___01(): r'''Ascending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(1) == pitchtools.NumberedPitchClass(1) assert pc + MCI(2) == pitchtools.NumberedPitchClass(2) assert pc + MCI(3) == pitchtools.NumberedPitchClass(3) assert pc + MCI(4) == pitchtools.NumberedPitchClass(4) assert pc + MCI(5) == pitchtools.NumberedPitchClass(5) assert pc + MCI(6) == pitchtools.NumberedPitchClass(6) assert pc + MCI(7) == pitchtools.NumberedPitchClass(7) assert pc + MCI(8) == pitchtools.NumberedPitchClass(8) assert pc + MCI(9) == pitchtools.NumberedPitchClass(9) assert pc + MCI(10) == pitchtools.NumberedPitchClass(10) assert pc + MCI(11) == pitchtools.NumberedPitchClass(11) def test_pitchtools_NumberedPitchClass___add___02(): r'''Ascending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(12) == pitchtools.NumberedPitchClass(0) assert pc + MCI(13) == pitchtools.NumberedPitchClass(1) assert pc + MCI(14) == pitchtools.NumberedPitchClass(2) assert pc + MCI(15) == pitchtools.NumberedPitchClass(3) assert pc + MCI(16) == pitchtools.NumberedPitchClass(4) assert pc + MCI(17) == pitchtools.NumberedPitchClass(5) assert pc + MCI(18) == pitchtools.NumberedPitchClass(6) assert pc + MCI(19) == pitchtools.NumberedPitchClass(7) assert pc + MCI(20) == pitchtools.NumberedPitchClass(8) assert pc + MCI(21) == pitchtools.NumberedPitchClass(9) assert pc + MCI(22) == pitchtools.NumberedPitchClass(10) assert pc + MCI(23) == pitchtools.NumberedPitchClass(11) def test_pitchtools_NumberedPitchClass___add___03(): r'''Descending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(-1) == pitchtools.NumberedPitchClass(11) assert pc + MCI(-2) == pitchtools.NumberedPitchClass(10) assert pc + MCI(-3) == pitchtools.NumberedPitchClass(9) assert pc + MCI(-4) == pitchtools.NumberedPitchClass(8) assert pc + MCI(-5) == pitchtools.NumberedPitchClass(7) assert pc + MCI(-6) == pitchtools.NumberedPitchClass(6) assert pc + MCI(-7) == pitchtools.NumberedPitchClass(5) assert pc + MCI(-8) == pitchtools.NumberedPitchClass(4) assert pc + MCI(-9) == pitchtools.NumberedPitchClass(3) assert pc + MCI(-10) == pitchtools.NumberedPitchClass(2) assert pc + MCI(-11) == pitchtools.NumberedPitchClass(1) def test_pitchtools_NumberedPitchClass___add___04(): r'''Descending numbered interval added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(-12) == pitchtools.NumberedPitchClass(0) assert pc + MCI(-13) == pitchtools.NumberedPitchClass(11) assert pc + MCI(-14) == pitchtools.NumberedPitchClass(10) assert pc + MCI(-15) == pitchtools.NumberedPitchClass(9) assert pc + MCI(-16) == pitchtools.NumberedPitchClass(8) assert pc + MCI(-17) == pitchtools.NumberedPitchClass(7) assert pc + MCI(-18) == pitchtools.NumberedPitchClass(6) assert pc + MCI(-19) == pitchtools.NumberedPitchClass(5) assert pc + MCI(-20) == pitchtools.NumberedPitchClass(4) assert pc + MCI(-21) == pitchtools.NumberedPitchClass(3) assert pc + MCI(-22) == pitchtools.NumberedPitchClass(2) assert pc + MCI(-23) == pitchtools.NumberedPitchClass(1) def test_pitchtools_NumberedPitchClass___add___05(): r'''numbered unison added to pitch-class. ''' pc = pitchtools.NumberedPitchClass(0) MCI = pitchtools.NumberedInterval assert pc + MCI(0) == pitchtools.NumberedPitchClass(0)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_NumberedPitchClass___add__.py
# -*- encoding: utf-8 -*- from abjad import * def test_schemetools_Scheme_format_scheme_value_01(): assert schemetools.Scheme.format_scheme_value(1) == '1' assert schemetools.Scheme.format_scheme_value(True) == '#t' assert schemetools.Scheme.format_scheme_value(False) == '#f' assert schemetools.Scheme.format_scheme_value('foo bar') == '"foo bar"' assert schemetools.Scheme.format_scheme_value('baz') == 'baz' assert schemetools.Scheme.format_scheme_value([1, 2, 3]) == '(1 2 3)'
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/schemetools/test/test_schemetools_Scheme_format_scheme_value.py
# -*- encoding: utf-8 -*- import abc from abjad.tools.abctools.AbjadObject import AbjadObject class TypedCollection(AbjadObject): r'''Abstract base class for typed collections. ''' ### CLASS VARIABLES ### __slots__ = ( '_collection', '_item_class', ) ### INITIALIZER ### @abc.abstractmethod def __init__(self, items=None, item_class=None): assert isinstance(item_class, (type(None), type)) self._item_class = item_class ### SPECIAL METHODS ### def __contains__(self, item): r'''Is true when typed collection container `item`. Otherwise false. Returns boolean. ''' try: item = self._item_coercer(item) except ValueError: return False return self._collection.__contains__(item) def __eq__(self, expr): r'''Is true when `expr` is a typed collection with items that compare equal to those of this typed collection. Otherwise false. Returns boolean. ''' if isinstance(expr, type(self)): return self._collection == expr._collection elif isinstance(expr, type(self._collection)): return self._collection == expr return False def __format__(self, format_specification=''): r'''Formats typed collection. Set `format_specification` to `''` or `'storage'`. Interprets `''` equal to `'storage'`. Returns string. ''' from abjad.tools import systemtools if format_specification in ('', 'storage'): return systemtools.StorageFormatManager.get_storage_format(self) return str(self) def __getnewargs__(self): r'''Gets new arguments. Returns tuple. ''' return (self._collection, self.item_class) def __hash__(self): r'''Hashes typed collection. Required to be explicitly re-defined on Python 3 if __eq__ changes. Returns integer. ''' return super(TypedCollection, self).__hash__() def __iter__(self): r'''Iterates typed collection. Returns generator. ''' return self._collection.__iter__() def __len__(self): r'''Length of typed collection. Returns nonnegative integer. ''' return len(self._collection) def __ne__(self, expr): r'''Is true when `expr` is not a typed collection with items equal to this typed collection. Otherwise false. Returns boolean. ''' return not self.__eq__(expr) ### PRIVATE METHODS ### def _on_insertion(self, item): r'''Override to operate on item after insertion into collection. ''' pass def _on_removal(self, item): r'''Override to operate on item after removal from collection. ''' pass ### PRIVATE PROPERTIES ### @property def _item_coercer(self): def coerce_(x): if isinstance(x, self._item_class): return x return self._item_class(x) if self._item_class is None: return lambda x: x return coerce_ @property def _repr_specification(self): from abjad.tools import systemtools manager = systemtools.StorageFormatManager names = manager.get_signature_keyword_argument_names(self) keyword_argument_names = list(names) if 'items' in keyword_argument_names: keyword_argument_names.remove('items') keyword_argument_names = tuple(keyword_argument_names) positional_argument_values = ( self._collection, ) return systemtools.StorageFormatSpecification( self, is_indented=False, keyword_argument_names=keyword_argument_names, positional_argument_values=positional_argument_values, ) @property def _storage_format_specification(self): from abjad.tools import systemtools manager = systemtools.StorageFormatManager names = manager.get_signature_keyword_argument_names(self) keyword_argument_names = list(names) if 'items' in keyword_argument_names: keyword_argument_names.remove('items') keyword_argument_names = tuple(keyword_argument_names) positional_argument_values = ( self._collection, ) return systemtools.StorageFormatSpecification( self, keyword_argument_names=keyword_argument_names, positional_argument_values=positional_argument_values, ) ### PUBLIC PROPERTIES ### @property def item_class(self): r'''Item class to coerce items into. ''' return self._item_class @property def items(self): r'''Gets collection items. ''' return [x for x in self]
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/datastructuretools/TypedCollection.py
# -*- encoding: utf-8 -*- def timespan_2_overlaps_all_of_timespan_1( timespan_1=None, timespan_2=None, hold=False, ): r'''Makes time relation indicating that `timespan_2` overlaps all of `timespan_1`. :: >>> relation = timespantools.timespan_2_overlaps_all_of_timespan_1() >>> print(format(relation)) timespantools.TimespanTimespanTimeRelation( inequality=timespantools.CompoundInequality( [ timespantools.SimpleInequality('timespan_2.start_offset < timespan_1.start_offset'), timespantools.SimpleInequality('timespan_1.stop_offset < timespan_2.stop_offset'), ], logical_operator='and', ), ) Returns time relation or boolean. ''' from abjad.tools import timespantools inequality = timespantools.CompoundInequality([ 'timespan_2.start_offset < timespan_1.start_offset', 'timespan_1.stop_offset < timespan_2.stop_offset', ]) time_relation = timespantools.TimespanTimespanTimeRelation( inequality, timespan_1=timespan_1, timespan_2=timespan_2, ) if time_relation.is_fully_loaded and not hold: return time_relation() else: return time_relation
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/timespantools/timespan_2_overlaps_all_of_timespan_1.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_PitchClassSet_multiply_01(): assert pitchtools.PitchClassSet([0, 1, 5]).multiply(5) == \ pitchtools.PitchClassSet([0, 1, 5]) assert pitchtools.PitchClassSet([1, 2, 6]).multiply(5) == \ pitchtools.PitchClassSet([5, 6, 10]) assert pitchtools.PitchClassSet([2, 3, 7]).multiply(5) == \ pitchtools.PitchClassSet([3, 10, 11])
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet_multiply.py
# -*- encoding: utf-8 -*- from abjad import * def test_scoretools_Container_index_01(): r'''Elements that compare equal return different indices in container. ''' container = Container(4 * Note("c'4")) assert container.index(container[0]) == 0 assert container.index(container[1]) == 1 assert container.index(container[2]) == 2 assert container.index(container[3]) == 3
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/scoretools/test/test_scoretools_Container_index.py
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_list_pitch_numbers_in_expr_01(): tuplet = scoretools.FixedDurationTuplet(Duration(2, 8), "c'8 d'8 e'8") assert pitchtools.list_pitch_numbers_in_expr(tuplet) == (0, 2, 4) def test_pitchtools_list_pitch_numbers_in_expr_02(): staff = Staff("c'8 d'8 e'8 f'8") assert pitchtools.list_pitch_numbers_in_expr(staff) == (0, 2, 4, 5)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_list_pitch_numbers_in_expr.py
# -*- encoding: utf-8 -*- from abjad import * def test_rhythmtreetools_RhythmTreeContainer_insert_01(): leaf_a = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3) leaf_b = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=3) leaf_c = rhythmtreetools.RhythmTreeLeaf(preprolated_duration=2) container = rhythmtreetools.RhythmTreeContainer() assert container.children == () container.insert(0, leaf_a) assert container.children == (leaf_a,) container.insert(0, leaf_b) assert container.children == (leaf_b, leaf_a) container.insert(1, leaf_c) assert container.children == (leaf_b, leaf_c, leaf_a)
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/rhythmtreetools/test/test_rhythmtreetools_RhythmTreeContainer_insert.py
# -*- encoding: utf-8 -*- import six from abjad.tools.stringtools.strip_diacritics import strip_diacritics def to_accent_free_snake_case(string): '''Changes `string` to accent-free snake case. .. container:: example :: >>> stringtools.to_accent_free_snake_case('Déja vu') 'deja_vu' Strips accents from accented characters. Changes all punctuation (including spaces) to underscore. Sets to lowercase. Returns string. ''' assert isinstance(string, six.string_types) result = strip_diacritics(string) result = result.replace(' ', '_') result = result.replace("'", '_') result = result.lower() return result
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/stringtools/to_accent_free_snake_case.py
# -*- encoding: utf-8 -*- import copy from abjad import * def test_pitchtools_NamedPitch___copy___01(): pitch = NamedPitch(13) new = copy.copy(pitch) assert new is not pitch assert new.accidental is not pitch.accidental
# -*- encoding: utf-8 -*- from abjad import * import pytest def test_pitchtools_PitchClassSet___slots___01(): r'''Named pitch-class set can not be changed after initialization. ''' named_pitch_classes = ['gs', 'a', 'as', 'c', 'cs'] named_pitch_class_set = pitchtools.PitchClassSet(named_pitch_classes) assert pytest.raises(AttributeError, "named_pitch_class_set.foo = 'bar'")
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_PitchClassSet___slots__.py
abjad/tools/pitchtools/test/test_pitchtools_NamedPitch___copy__.py
"""Support for monitoring juicenet/juicepoint/juicebox based EVSE switches.""" from homeassistant.components.switch import SwitchEntity from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR from .entity import JuiceNetDevice async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the JuiceNet switches.""" entities = [] juicenet_data = hass.data[DOMAIN][config_entry.entry_id] api = juicenet_data[JUICENET_API] coordinator = juicenet_data[JUICENET_COORDINATOR] for device in api.devices: entities.append(JuiceNetChargeNowSwitch(device, coordinator)) async_add_entities(entities) class JuiceNetChargeNowSwitch(JuiceNetDevice, SwitchEntity): """Implementation of a JuiceNet switch.""" def __init__(self, device, coordinator): """Initialise the switch.""" super().__init__(device, "charge_now", coordinator) @property def name(self): """Return the name of the device.""" return f"{self.device.name} Charge Now" @property def is_on(self): """Return true if switch is on.""" return self.device.override_time != 0 async def async_turn_on(self, **kwargs): """Charge now.""" await self.device.set_override(True) async def async_turn_off(self, **kwargs): """Don't charge now.""" await self.device.set_override(False)
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/juicenet/switch.py
"""Support for Melissa Climate A/C.""" import logging from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( FAN_AUTO, FAN_HIGH, FAN_LOW, FAN_MEDIUM, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS from . import DATA_MELISSA _LOGGER = logging.getLogger(__name__) SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE OP_MODES = [ HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_OFF, ] FAN_MODES = [FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_LOW] async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Iterate through and add all Melissa devices.""" api = hass.data[DATA_MELISSA] devices = (await api.async_fetch_devices()).values() all_devices = [] for device in devices: if device["type"] == "melissa": all_devices.append(MelissaClimate(api, device["serial_number"], device)) async_add_entities(all_devices) class MelissaClimate(ClimateEntity): """Representation of a Melissa Climate device.""" def __init__(self, api, serial_number, init_data): """Initialize the climate device.""" self._name = init_data["name"] self._api = api self._serial_number = serial_number self._data = init_data["controller_log"] self._state = None self._cur_settings = None @property def name(self): """Return the name of the thermostat, if any.""" return self._name @property def fan_mode(self): """Return the current fan mode.""" if self._cur_settings is not None: return self.melissa_fan_to_hass(self._cur_settings[self._api.FAN]) @property def current_temperature(self): """Return the current temperature.""" if self._data: return self._data[self._api.TEMP] @property def current_humidity(self): """Return the current humidity value.""" if self._data: return self._data[self._api.HUMIDITY] @property def target_temperature_step(self): """Return the supported step of target temperature.""" return PRECISION_WHOLE @property def hvac_mode(self): """Return the current operation mode.""" if self._cur_settings is None: return None is_on = self._cur_settings[self._api.STATE] in ( self._api.STATE_ON, self._api.STATE_IDLE, ) if not is_on: return HVAC_MODE_OFF return self.melissa_op_to_hass(self._cur_settings[self._api.MODE]) @property def hvac_modes(self): """Return the list of available operation modes.""" return OP_MODES @property def fan_modes(self): """List of available fan modes.""" return FAN_MODES @property def target_temperature(self): """Return the temperature we try to reach.""" if self._cur_settings is None: return None return self._cur_settings[self._api.TEMP] @property def temperature_unit(self): """Return the unit of measurement which this thermostat uses.""" return TEMP_CELSIUS @property def min_temp(self): """Return the minimum supported temperature for the thermostat.""" return 16 @property def max_temp(self): """Return the maximum supported temperature for the thermostat.""" return 30 @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS async def async_set_temperature(self, **kwargs): """Set new target temperature.""" temp = kwargs.get(ATTR_TEMPERATURE) await self.async_send({self._api.TEMP: temp}) async def async_set_fan_mode(self, fan_mode): """Set fan mode.""" melissa_fan_mode = self.hass_fan_to_melissa(fan_mode) await self.async_send({self._api.FAN: melissa_fan_mode}) async def async_set_hvac_mode(self, hvac_mode): """Set operation mode.""" if hvac_mode == HVAC_MODE_OFF: await self.async_send({self._api.STATE: self._api.STATE_OFF}) return mode = self.hass_mode_to_melissa(hvac_mode) await self.async_send( {self._api.MODE: mode, self._api.STATE: self._api.STATE_ON} ) async def async_send(self, value): """Send action to service.""" try: old_value = self._cur_settings.copy() self._cur_settings.update(value) except AttributeError: old_value = None if not await self._api.async_send( self._serial_number, "melissa", self._cur_settings ): self._cur_settings = old_value async def async_update(self): """Get latest data from Melissa.""" try: self._data = (await self._api.async_status(cached=True))[ self._serial_number ] self._cur_settings = ( await self._api.async_cur_settings(self._serial_number) )["controller"]["_relation"]["command_log"] except KeyError: _LOGGER.warning("Unable to update entity %s", self.entity_id) def melissa_op_to_hass(self, mode): """Translate Melissa modes to hass states.""" if mode == self._api.MODE_HEAT: return HVAC_MODE_HEAT if mode == self._api.MODE_COOL: return HVAC_MODE_COOL if mode == self._api.MODE_DRY: return HVAC_MODE_DRY if mode == self._api.MODE_FAN: return HVAC_MODE_FAN_ONLY _LOGGER.warning("Operation mode %s could not be mapped to hass", mode) return None def melissa_fan_to_hass(self, fan): """Translate Melissa fan modes to hass modes.""" if fan == self._api.FAN_AUTO: return HVAC_MODE_AUTO if fan == self._api.FAN_LOW: return FAN_LOW if fan == self._api.FAN_MEDIUM: return FAN_MEDIUM if fan == self._api.FAN_HIGH: return FAN_HIGH _LOGGER.warning("Fan mode %s could not be mapped to hass", fan) return None def hass_mode_to_melissa(self, mode): """Translate hass states to melissa modes.""" if mode == HVAC_MODE_HEAT: return self._api.MODE_HEAT if mode == HVAC_MODE_COOL: return self._api.MODE_COOL if mode == HVAC_MODE_DRY: return self._api.MODE_DRY if mode == HVAC_MODE_FAN_ONLY: return self._api.MODE_FAN _LOGGER.warning("Melissa have no setting for %s mode", mode) def hass_fan_to_melissa(self, fan): """Translate hass fan modes to melissa modes.""" if fan == HVAC_MODE_AUTO: return self._api.FAN_AUTO if fan == FAN_LOW: return self._api.FAN_LOW if fan == FAN_MEDIUM: return self._api.FAN_MEDIUM if fan == FAN_HIGH: return self._api.FAN_HIGH _LOGGER.warning("Melissa have no setting for %s fan mode", fan)
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/melissa/climate.py
"""Support for VELUX KLF 200 devices.""" import logging from pyvlx import PyVLX, PyVLXException import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv DOMAIN = "velux" DATA_VELUX = "data_velux" SUPPORTED_DOMAINS = ["cover", "scene"] _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( {vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string} ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the velux component.""" try: hass.data[DATA_VELUX] = VeluxModule(hass, config[DOMAIN]) hass.data[DATA_VELUX].setup() await hass.data[DATA_VELUX].async_start() except PyVLXException as ex: _LOGGER.exception("Can't connect to velux interface: %s", ex) return False for component in SUPPORTED_DOMAINS: hass.async_create_task( discovery.async_load_platform(hass, component, DOMAIN, {}, config) ) return True class VeluxModule: """Abstraction for velux component.""" def __init__(self, hass, domain_config): """Initialize for velux component.""" self.pyvlx = None self._hass = hass self._domain_config = domain_config def setup(self): """Velux component setup.""" async def on_hass_stop(event): """Close connection when hass stops.""" _LOGGER.debug("Velux interface terminated") await self.pyvlx.disconnect() async def async_reboot_gateway(service_call): await self.pyvlx.reboot_gateway() self._hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop) host = self._domain_config.get(CONF_HOST) password = self._domain_config.get(CONF_PASSWORD) self.pyvlx = PyVLX(host=host, password=password) self._hass.services.async_register( DOMAIN, "reboot_gateway", async_reboot_gateway ) async def async_start(self): """Start velux component.""" _LOGGER.debug("Velux interface started") await self.pyvlx.load_scenes() await self.pyvlx.load_nodes()
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/velux/__init__.py
"""Insteon base entity.""" import functools import logging from pyinsteon import devices from homeassistant.core import callback from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from .const import ( DOMAIN, SIGNAL_ADD_DEFAULT_LINKS, SIGNAL_LOAD_ALDB, SIGNAL_PRINT_ALDB, SIGNAL_REMOVE_ENTITY, SIGNAL_SAVE_DEVICES, STATE_NAME_LABEL_MAP, ) from .utils import print_aldb_to_log _LOGGER = logging.getLogger(__name__) class InsteonEntity(Entity): """INSTEON abstract base entity.""" def __init__(self, device, group): """Initialize the INSTEON binary sensor.""" self._insteon_device_group = device.groups[group] self._insteon_device = device def __hash__(self): """Return the hash of the Insteon Entity.""" return hash(self._insteon_device) @property def should_poll(self): """No polling needed.""" return False @property def address(self): """Return the address of the node.""" return str(self._insteon_device.address) @property def group(self): """Return the INSTEON group that the entity responds to.""" return self._insteon_device_group.group @property def unique_id(self) -> str: """Return a unique ID.""" if self._insteon_device_group.group == 0x01: uid = self._insteon_device.id else: uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}" return uid @property def name(self): """Return the name of the node (used for Entity_ID).""" # Set a base description description = self._insteon_device.description if description is None: description = "Unknown Device" # Get an extension label if there is one extension = self._get_label() if extension: extension = f" {extension}" return f"{description} {self._insteon_device.address}{extension}" @property def device_state_attributes(self): """Provide attributes for display on device card.""" return {"insteon_address": self.address, "insteon_group": self.group} @property def device_info(self): """Return device information.""" return { "identifiers": {(DOMAIN, str(self._insteon_device.address))}, "name": f"{self._insteon_device.description} {self._insteon_device.address}", "model": f"{self._insteon_device.model} ({self._insteon_device.cat!r}, 0x{self._insteon_device.subcat:02x})", "sw_version": f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}", "manufacturer": "Smart Home", "via_device": (DOMAIN, str(devices.modem.address)), } @callback def async_entity_update(self, name, address, value, group): """Receive notification from transport that new data exists.""" _LOGGER.debug( "Received update for device %s group %d value %s", address, group, value, ) self.async_write_ha_state() async def async_added_to_hass(self): """Register INSTEON update events.""" _LOGGER.debug( "Tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.subscribe(self.async_entity_update) load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}" self.async_on_remove( async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb) ) print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}" async_dispatcher_connect(self.hass, print_signal, self._print_aldb) default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}" async_dispatcher_connect( self.hass, default_links_signal, self._async_add_default_links ) remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}" self.async_on_remove( async_dispatcher_connect( self.hass, remove_signal, functools.partial(self.async_remove, force_remove=True), ) ) async def async_will_remove_from_hass(self): """Unsubscribe to INSTEON update events.""" _LOGGER.debug( "Remove tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.unsubscribe(self.async_entity_update) async def _async_read_aldb(self, reload): """Call device load process and print to log.""" await self._insteon_device.aldb.async_load(refresh=reload) self._print_aldb() async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES) def _print_aldb(self): """Print the device ALDB to the log file.""" print_aldb_to_log(self._insteon_device.aldb) def _get_label(self): """Get the device label for grouped devices.""" label = "" if len(self._insteon_device.groups) > 1: if self._insteon_device_group.name in STATE_NAME_LABEL_MAP: label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name] else: label = f"Group {self.group:d}" return label async def _async_add_default_links(self): """Add default links between the device and the modem.""" await self._insteon_device.async_add_default_links()
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/insteon/insteon_entity.py
"""Config flow to configure the Toon component.""" import logging from typing import Any, Dict, List, Optional from toonapi import Agreement, Toon, ToonError import voluptuous as vol from homeassistant import config_entries from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.config_entry_oauth2_flow import AbstractOAuth2FlowHandler from .const import CONF_AGREEMENT, CONF_AGREEMENT_ID, CONF_MIGRATE, DOMAIN class ToonFlowHandler(AbstractOAuth2FlowHandler, domain=DOMAIN): """Handle a Toon config flow.""" CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH DOMAIN = DOMAIN VERSION = 2 agreements: Optional[List[Agreement]] = None data: Optional[Dict[str, Any]] = None @property def logger(self) -> logging.Logger: """Return logger.""" return logging.getLogger(__name__) async def async_oauth_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]: """Test connection and load up agreements.""" self.data = data toon = Toon( token=self.data["token"]["access_token"], session=async_get_clientsession(self.hass), ) try: self.agreements = await toon.agreements() except ToonError: return self.async_abort(reason="connection_error") if not self.agreements: return self.async_abort(reason="no_agreements") return await self.async_step_agreement() async def async_step_import( self, config: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Start a configuration flow based on imported data. This step is merely here to trigger "discovery" when the `toon` integration is listed in the user configuration, or when migrating from the version 1 schema. """ if config is not None and CONF_MIGRATE in config: self.context.update({CONF_MIGRATE: config[CONF_MIGRATE]}) else: await self._async_handle_discovery_without_unique_id() return await self.async_step_user() async def async_step_agreement( self, user_input: Dict[str, Any] = None ) -> Dict[str, Any]: """Select Toon agreement to add.""" if len(self.agreements) == 1: return await self._create_entry(self.agreements[0]) agreements_list = [ f"{agreement.street} {agreement.house_number}, {agreement.city}" for agreement in self.agreements ] if user_input is None: return self.async_show_form( step_id="agreement", data_schema=vol.Schema( {vol.Required(CONF_AGREEMENT): vol.In(agreements_list)} ), ) agreement_index = agreements_list.index(user_input[CONF_AGREEMENT]) return await self._create_entry(self.agreements[agreement_index]) async def _create_entry(self, agreement: Agreement) -> Dict[str, Any]: if CONF_MIGRATE in self.context: await self.hass.config_entries.async_remove(self.context[CONF_MIGRATE]) await self.async_set_unique_id(agreement.agreement_id) self._abort_if_unique_id_configured() self.data[CONF_AGREEMENT_ID] = agreement.agreement_id return self.async_create_entry( title=f"{agreement.street} {agreement.house_number}, {agreement.city}", data=self.data, )
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/toon/config_flow.py
"""Support for Bond fans.""" import logging import math from typing import Any, Callable, List, Optional, Tuple from bond_api import Action, BPUPSubscriptions, DeviceType, Direction from homeassistant.components.fan import ( DIRECTION_FORWARD, DIRECTION_REVERSE, SUPPORT_DIRECTION, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import Entity from homeassistant.util.percentage import ( percentage_to_ranged_value, ranged_value_to_percentage, ) from .const import BPUP_SUBS, DOMAIN, HUB from .entity import BondEntity from .utils import BondDevice, BondHub _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable[[List[Entity], bool], None], ) -> None: """Set up Bond fan devices.""" data = hass.data[DOMAIN][entry.entry_id] hub: BondHub = data[HUB] bpup_subs: BPUPSubscriptions = data[BPUP_SUBS] fans = [ BondFan(hub, device, bpup_subs) for device in hub.devices if DeviceType.is_fan(device.type) ] async_add_entities(fans, True) class BondFan(BondEntity, FanEntity): """Representation of a Bond fan.""" def __init__(self, hub: BondHub, device: BondDevice, bpup_subs: BPUPSubscriptions): """Create HA entity representing Bond fan.""" super().__init__(hub, device, bpup_subs) self._power: Optional[bool] = None self._speed: Optional[int] = None self._direction: Optional[int] = None def _apply_state(self, state: dict): self._power = state.get("power") self._speed = state.get("speed") self._direction = state.get("direction") @property def supported_features(self) -> int: """Flag supported features.""" features = 0 if self._device.supports_speed(): features |= SUPPORT_SET_SPEED if self._device.supports_direction(): features |= SUPPORT_DIRECTION return features @property def _speed_range(self) -> Tuple[int, int]: """Return the range of speeds.""" return (1, self._device.props.get("max_speed", 3)) @property def percentage(self) -> Optional[str]: """Return the current speed percentage for the fan.""" if not self._speed or not self._power: return 0 return ranged_value_to_percentage(self._speed_range, self._speed) @property def current_direction(self) -> Optional[str]: """Return fan rotation direction.""" direction = None if self._direction == Direction.FORWARD: direction = DIRECTION_FORWARD elif self._direction == Direction.REVERSE: direction = DIRECTION_REVERSE return direction async def async_set_percentage(self, percentage: int) -> None: """Set the desired speed for the fan.""" _LOGGER.debug("async_set_percentage called with percentage %s", percentage) if percentage == 0: await self.async_turn_off() return bond_speed = math.ceil( percentage_to_ranged_value(self._speed_range, percentage) ) _LOGGER.debug( "async_set_percentage converted percentage %s to bond speed %s", percentage, bond_speed, ) await self._hub.bond.action( self._device.device_id, Action.set_speed(bond_speed) ) async def async_turn_on( self, speed: Optional[str] = None, percentage: Optional[int] = None, preset_mode: Optional[str] = None, **kwargs, ) -> None: """Turn on the fan.""" _LOGGER.debug("Fan async_turn_on called with percentage %s", percentage) if percentage is not None: await self.async_set_percentage(percentage) else: await self._hub.bond.action(self._device.device_id, Action.turn_on()) async def async_turn_off(self, **kwargs: Any) -> None: """Turn the fan off.""" await self._hub.bond.action(self._device.device_id, Action.turn_off()) async def async_set_direction(self, direction: str): """Set fan rotation direction.""" bond_direction = ( Direction.REVERSE if direction == DIRECTION_REVERSE else Direction.FORWARD ) await self._hub.bond.action( self._device.device_id, Action.set_direction(bond_direction) )
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/bond/fan.py
"""Support for Netgear LTE binary sensors.""" from homeassistant.components.binary_sensor import DOMAIN, BinarySensorEntity from homeassistant.exceptions import PlatformNotReady from . import CONF_MONITORED_CONDITIONS, DATA_KEY, LTEEntity from .sensor_types import BINARY_SENSOR_CLASSES async def async_setup_platform(hass, config, async_add_entities, discovery_info): """Set up Netgear LTE binary sensor devices.""" if discovery_info is None: return modem_data = hass.data[DATA_KEY].get_modem_data(discovery_info) if not modem_data or not modem_data.data: raise PlatformNotReady binary_sensor_conf = discovery_info[DOMAIN] monitored_conditions = binary_sensor_conf[CONF_MONITORED_CONDITIONS] binary_sensors = [] for sensor_type in monitored_conditions: binary_sensors.append(LTEBinarySensor(modem_data, sensor_type)) async_add_entities(binary_sensors) class LTEBinarySensor(LTEEntity, BinarySensorEntity): """Netgear LTE binary sensor entity.""" @property def is_on(self): """Return true if the binary sensor is on.""" return getattr(self.modem_data.data, self.sensor_type) @property def device_class(self): """Return the class of binary sensor.""" return BINARY_SENSOR_CLASSES[self.sensor_type]
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/netgear_lte/binary_sensor.py
"""Event parser and human readable log generator.""" from datetime import timedelta from itertools import groupby import json import re import sqlalchemy from sqlalchemy.orm import aliased from sqlalchemy.sql.expression import literal import voluptuous as vol from homeassistant.components.automation import EVENT_AUTOMATION_TRIGGERED from homeassistant.components.history import sqlalchemy_filter_from_include_exclude_conf from homeassistant.components.http import HomeAssistantView from homeassistant.components.recorder.models import ( Events, States, process_timestamp_to_utc_isoformat, ) from homeassistant.components.recorder.util import session_scope from homeassistant.components.script import EVENT_SCRIPT_STARTED from homeassistant.const import ( ATTR_DOMAIN, ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, ATTR_ICON, ATTR_NAME, ATTR_SERVICE, EVENT_CALL_SERVICE, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_LOGBOOK_ENTRY, EVENT_STATE_CHANGED, HTTP_BAD_REQUEST, ) from homeassistant.core import DOMAIN as HA_DOMAIN, callback, split_entity_id from homeassistant.exceptions import InvalidEntityFormatError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entityfilter import ( INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA, convert_include_exclude_filter, generate_filter, ) from homeassistant.helpers.integration_platform import ( async_process_integration_platforms, ) from homeassistant.loader import bind_hass import homeassistant.util.dt as dt_util ENTITY_ID_JSON_TEMPLATE = '"entity_id": "{}"' ENTITY_ID_JSON_EXTRACT = re.compile('"entity_id": "([^"]+)"') DOMAIN_JSON_EXTRACT = re.compile('"domain": "([^"]+)"') ICON_JSON_EXTRACT = re.compile('"icon": "([^"]+)"') ATTR_MESSAGE = "message" CONTINUOUS_DOMAINS = ["proximity", "sensor"] DOMAIN = "logbook" GROUP_BY_MINUTES = 15 EMPTY_JSON_OBJECT = "{}" UNIT_OF_MEASUREMENT_JSON = '"unit_of_measurement":' HA_DOMAIN_ENTITY_ID = f"{HA_DOMAIN}." CONFIG_SCHEMA = vol.Schema( {DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA}, extra=vol.ALLOW_EXTRA ) HOMEASSISTANT_EVENTS = [ EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, ] ALL_EVENT_TYPES_EXCEPT_STATE_CHANGED = [ EVENT_LOGBOOK_ENTRY, EVENT_CALL_SERVICE, *HOMEASSISTANT_EVENTS, ] ALL_EVENT_TYPES = [ EVENT_STATE_CHANGED, *ALL_EVENT_TYPES_EXCEPT_STATE_CHANGED, ] EVENT_COLUMNS = [ Events.event_type, Events.event_data, Events.time_fired, Events.context_id, Events.context_user_id, Events.context_parent_id, ] SCRIPT_AUTOMATION_EVENTS = [EVENT_AUTOMATION_TRIGGERED, EVENT_SCRIPT_STARTED] LOG_MESSAGE_SCHEMA = vol.Schema( { vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_MESSAGE): cv.template, vol.Optional(ATTR_DOMAIN): cv.slug, vol.Optional(ATTR_ENTITY_ID): cv.entity_id, } ) @bind_hass def log_entry(hass, name, message, domain=None, entity_id=None, context=None): """Add an entry to the logbook.""" hass.add_job(async_log_entry, hass, name, message, domain, entity_id, context) @bind_hass def async_log_entry(hass, name, message, domain=None, entity_id=None, context=None): """Add an entry to the logbook.""" data = {ATTR_NAME: name, ATTR_MESSAGE: message} if domain is not None: data[ATTR_DOMAIN] = domain if entity_id is not None: data[ATTR_ENTITY_ID] = entity_id hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data, context=context) async def async_setup(hass, config): """Logbook setup.""" hass.data[DOMAIN] = {} @callback def log_message(service): """Handle sending notification message service calls.""" message = service.data[ATTR_MESSAGE] name = service.data[ATTR_NAME] domain = service.data.get(ATTR_DOMAIN) entity_id = service.data.get(ATTR_ENTITY_ID) if entity_id is None and domain is None: # If there is no entity_id or # domain, the event will get filtered # away so we use the "logbook" domain domain = DOMAIN message.hass = hass message = message.async_render(parse_result=False) async_log_entry(hass, name, message, domain, entity_id) hass.components.frontend.async_register_built_in_panel( "logbook", "logbook", "hass:format-list-bulleted-type" ) conf = config.get(DOMAIN, {}) if conf: filters = sqlalchemy_filter_from_include_exclude_conf(conf) entities_filter = convert_include_exclude_filter(conf) else: filters = None entities_filter = None hass.http.register_view(LogbookView(conf, filters, entities_filter)) hass.services.async_register(DOMAIN, "log", log_message, schema=LOG_MESSAGE_SCHEMA) await async_process_integration_platforms(hass, DOMAIN, _process_logbook_platform) return True async def _process_logbook_platform(hass, domain, platform): """Process a logbook platform.""" @callback def _async_describe_event(domain, event_name, describe_callback): """Teach logbook how to describe a new event.""" hass.data[DOMAIN][event_name] = (domain, describe_callback) platform.async_describe_events(hass, _async_describe_event) class LogbookView(HomeAssistantView): """Handle logbook view requests.""" url = "/api/logbook" name = "api:logbook" extra_urls = ["/api/logbook/{datetime}"] def __init__(self, config, filters, entities_filter): """Initialize the logbook view.""" self.config = config self.filters = filters self.entities_filter = entities_filter async def get(self, request, datetime=None): """Retrieve logbook entries.""" if datetime: datetime = dt_util.parse_datetime(datetime) if datetime is None: return self.json_message("Invalid datetime", HTTP_BAD_REQUEST) else: datetime = dt_util.start_of_local_day() period = request.query.get("period") if period is None: period = 1 else: period = int(period) entity_ids = request.query.get("entity") if entity_ids: try: entity_ids = cv.entity_ids(entity_ids) except vol.Invalid: raise InvalidEntityFormatError( f"Invalid entity id(s) encountered: {entity_ids}. " "Format should be <domain>.<object_id>" ) from vol.Invalid end_time = request.query.get("end_time") if end_time is None: start_day = dt_util.as_utc(datetime) - timedelta(days=period - 1) end_day = start_day + timedelta(days=period) else: start_day = datetime end_day = dt_util.parse_datetime(end_time) if end_day is None: return self.json_message("Invalid end_time", HTTP_BAD_REQUEST) hass = request.app["hass"] entity_matches_only = "entity_matches_only" in request.query def json_events(): """Fetch events and generate JSON.""" return self.json( _get_events( hass, start_day, end_day, entity_ids, self.filters, self.entities_filter, entity_matches_only, ) ) return await hass.async_add_executor_job(json_events) def humanify(hass, events, entity_attr_cache, context_lookup): """Generate a converted list of events into Entry objects. Will try to group events if possible: - if 2+ sensor updates in GROUP_BY_MINUTES, show last - if Home Assistant stop and start happen in same minute call it restarted """ external_events = hass.data.get(DOMAIN, {}) # Group events in batches of GROUP_BY_MINUTES for _, g_events in groupby( events, lambda event: event.time_fired_minute // GROUP_BY_MINUTES ): events_batch = list(g_events) # Keep track of last sensor states last_sensor_event = {} # Group HA start/stop events # Maps minute of event to 1: stop, 2: stop + start start_stop_events = {} # Process events for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: if event.domain in CONTINUOUS_DOMAINS: last_sensor_event[event.entity_id] = event elif event.event_type == EVENT_HOMEASSISTANT_STOP: if event.time_fired_minute in start_stop_events: continue start_stop_events[event.time_fired_minute] = 1 elif event.event_type == EVENT_HOMEASSISTANT_START: if event.time_fired_minute not in start_stop_events: continue start_stop_events[event.time_fired_minute] = 2 # Yield entries for event in events_batch: if event.event_type == EVENT_STATE_CHANGED: entity_id = event.entity_id domain = event.domain if ( domain in CONTINUOUS_DOMAINS and event != last_sensor_event[entity_id] ): # Skip all but the last sensor state continue data = { "when": event.time_fired_isoformat, "name": _entity_name_from_event( entity_id, event, entity_attr_cache ), "state": event.state, "entity_id": entity_id, } icon = event.attributes_icon if icon: data["icon"] = icon if event.context_user_id: data["context_user_id"] = event.context_user_id _augment_data_with_context( data, entity_id, event, context_lookup, entity_attr_cache, external_events, ) yield data elif event.event_type in external_events: domain, describe_event = external_events[event.event_type] data = describe_event(event) data["when"] = event.time_fired_isoformat data["domain"] = domain if event.context_user_id: data["context_user_id"] = event.context_user_id _augment_data_with_context( data, data.get(ATTR_ENTITY_ID), event, context_lookup, entity_attr_cache, external_events, ) yield data elif event.event_type == EVENT_HOMEASSISTANT_START: if start_stop_events.get(event.time_fired_minute) == 2: continue yield { "when": event.time_fired_isoformat, "name": "Home Assistant", "message": "started", "domain": HA_DOMAIN, } elif event.event_type == EVENT_HOMEASSISTANT_STOP: if start_stop_events.get(event.time_fired_minute) == 2: action = "restarted" else: action = "stopped" yield { "when": event.time_fired_isoformat, "name": "Home Assistant", "message": action, "domain": HA_DOMAIN, } elif event.event_type == EVENT_LOGBOOK_ENTRY: event_data = event.data domain = event_data.get(ATTR_DOMAIN) entity_id = event_data.get(ATTR_ENTITY_ID) if domain is None and entity_id is not None: try: domain = split_entity_id(str(entity_id))[0] except IndexError: pass data = { "when": event.time_fired_isoformat, "name": event_data.get(ATTR_NAME), "message": event_data.get(ATTR_MESSAGE), "domain": domain, "entity_id": entity_id, } if event.context_user_id: data["context_user_id"] = event.context_user_id _augment_data_with_context( data, entity_id, event, context_lookup, entity_attr_cache, external_events, ) yield data def _get_events( hass, start_day, end_day, entity_ids=None, filters=None, entities_filter=None, entity_matches_only=False, ): """Get events for a period of time.""" entity_attr_cache = EntityAttributeCache(hass) context_lookup = {None: None} def yield_events(query): """Yield Events that are not filtered away.""" for row in query.yield_per(1000): event = LazyEventPartialState(row) context_lookup.setdefault(event.context_id, event) if event.event_type == EVENT_CALL_SERVICE: continue if event.event_type == EVENT_STATE_CHANGED or _keep_event( hass, event, entities_filter ): yield event if entity_ids is not None: entities_filter = generate_filter([], entity_ids, [], []) with session_scope(hass=hass) as session: old_state = aliased(States, name="old_state") if entity_ids is not None: query = _generate_events_query_without_states(session) query = _apply_event_time_filter(query, start_day, end_day) query = _apply_event_types_filter( hass, query, ALL_EVENT_TYPES_EXCEPT_STATE_CHANGED ) if entity_matches_only: # When entity_matches_only is provided, contexts and events that do not # contain the entity_ids are not included in the logbook response. query = _apply_event_entity_id_matchers(query, entity_ids) query = query.union_all( _generate_states_query( session, start_day, end_day, old_state, entity_ids ) ) else: query = _generate_events_query(session) query = _apply_event_time_filter(query, start_day, end_day) query = _apply_events_types_and_states_filter( hass, query, old_state ).filter( (States.last_updated == States.last_changed) | (Events.event_type != EVENT_STATE_CHANGED) ) if filters: query = query.filter( filters.entity_filter() | (Events.event_type != EVENT_STATE_CHANGED) ) query = query.order_by(Events.time_fired) return list( humanify(hass, yield_events(query), entity_attr_cache, context_lookup) ) def _generate_events_query(session): return session.query( *EVENT_COLUMNS, States.state, States.entity_id, States.domain, States.attributes, ) def _generate_events_query_without_states(session): return session.query( *EVENT_COLUMNS, literal(None).label("state"), literal(None).label("entity_id"), literal(None).label("domain"), literal(None).label("attributes"), ) def _generate_states_query(session, start_day, end_day, old_state, entity_ids): return ( _generate_events_query(session) .outerjoin(Events, (States.event_id == Events.event_id)) .outerjoin(old_state, (States.old_state_id == old_state.state_id)) .filter(_missing_state_matcher(old_state)) .filter(_continuous_entity_matcher()) .filter((States.last_updated > start_day) & (States.last_updated < end_day)) .filter( (States.last_updated == States.last_changed) & States.entity_id.in_(entity_ids) ) ) def _apply_events_types_and_states_filter(hass, query, old_state): events_query = ( query.outerjoin(States, (Events.event_id == States.event_id)) .outerjoin(old_state, (States.old_state_id == old_state.state_id)) .filter( (Events.event_type != EVENT_STATE_CHANGED) | _missing_state_matcher(old_state) ) .filter( (Events.event_type != EVENT_STATE_CHANGED) | _continuous_entity_matcher() ) ) return _apply_event_types_filter(hass, events_query, ALL_EVENT_TYPES) def _missing_state_matcher(old_state): # The below removes state change events that do not have # and old_state or the old_state is missing (newly added entities) # or the new_state is missing (removed entities) return sqlalchemy.and_( old_state.state_id.isnot(None), (States.state != old_state.state), States.state.isnot(None), ) def _continuous_entity_matcher(): # # Prefilter out continuous domains that have # ATTR_UNIT_OF_MEASUREMENT as its much faster in sql. # return sqlalchemy.or_( sqlalchemy.not_(States.domain.in_(CONTINUOUS_DOMAINS)), sqlalchemy.not_(States.attributes.contains(UNIT_OF_MEASUREMENT_JSON)), ) def _apply_event_time_filter(events_query, start_day, end_day): return events_query.filter( (Events.time_fired > start_day) & (Events.time_fired < end_day) ) def _apply_event_types_filter(hass, query, event_types): return query.filter( Events.event_type.in_(event_types + list(hass.data.get(DOMAIN, {}))) ) def _apply_event_entity_id_matchers(events_query, entity_ids): return events_query.filter( sqlalchemy.or_( *[ Events.event_data.contains(ENTITY_ID_JSON_TEMPLATE.format(entity_id)) for entity_id in entity_ids ] ) ) def _keep_event(hass, event, entities_filter): if event.event_type in HOMEASSISTANT_EVENTS: return entities_filter is None or entities_filter(HA_DOMAIN_ENTITY_ID) entity_id = event.data_entity_id if entity_id: return entities_filter is None or entities_filter(entity_id) if event.event_type in hass.data[DOMAIN]: # If the entity_id isn't described, use the domain that describes # the event for filtering. domain = hass.data[DOMAIN][event.event_type][0] else: domain = event.data_domain if domain is None: return False return entities_filter is None or entities_filter(f"{domain}.") def _augment_data_with_context( data, entity_id, event, context_lookup, entity_attr_cache, external_events ): context_event = context_lookup.get(event.context_id) if not context_event: return if event == context_event: # This is the first event with the given ID. Was it directly caused by # a parent event? if event.context_parent_id: context_event = context_lookup.get(event.context_parent_id) # Ensure the (parent) context_event exists and is not the root cause of # this log entry. if not context_event or event == context_event: return event_type = context_event.event_type context_entity_id = context_event.entity_id # State change if context_entity_id: data["context_entity_id"] = context_entity_id data["context_entity_id_name"] = _entity_name_from_event( context_entity_id, context_event, entity_attr_cache ) data["context_event_type"] = event_type return event_data = context_event.data # Call service if event_type == EVENT_CALL_SERVICE: event_data = context_event.data data["context_domain"] = event_data.get(ATTR_DOMAIN) data["context_service"] = event_data.get(ATTR_SERVICE) data["context_event_type"] = event_type return if not entity_id: return attr_entity_id = event_data.get(ATTR_ENTITY_ID) if not attr_entity_id or ( event_type in SCRIPT_AUTOMATION_EVENTS and attr_entity_id == entity_id ): return if context_event == event: return data["context_entity_id"] = attr_entity_id data["context_entity_id_name"] = _entity_name_from_event( attr_entity_id, context_event, entity_attr_cache ) data["context_event_type"] = event_type if event_type in external_events: domain, describe_event = external_events[event_type] data["context_domain"] = domain name = describe_event(context_event).get(ATTR_NAME) if name: data["context_name"] = name def _entity_name_from_event(entity_id, event, entity_attr_cache): """Extract the entity name from the event using the cache if possible.""" return entity_attr_cache.get( entity_id, ATTR_FRIENDLY_NAME, event ) or split_entity_id(entity_id)[1].replace("_", " ") class LazyEventPartialState: """A lazy version of core Event with limited State joined in.""" __slots__ = [ "_row", "_event_data", "_time_fired_isoformat", "_attributes", "event_type", "entity_id", "state", "domain", "context_id", "context_user_id", "context_parent_id", "time_fired_minute", ] def __init__(self, row): """Init the lazy event.""" self._row = row self._event_data = None self._time_fired_isoformat = None self._attributes = None self.event_type = self._row.event_type self.entity_id = self._row.entity_id self.state = self._row.state self.domain = self._row.domain self.context_id = self._row.context_id self.context_user_id = self._row.context_user_id self.context_parent_id = self._row.context_parent_id self.time_fired_minute = self._row.time_fired.minute @property def attributes_icon(self): """Extract the icon from the decoded attributes or json.""" if self._attributes: return self._attributes.get(ATTR_ICON) result = ICON_JSON_EXTRACT.search(self._row.attributes) return result and result.group(1) @property def data_entity_id(self): """Extract the entity id from the decoded data or json.""" if self._event_data: return self._event_data.get(ATTR_ENTITY_ID) result = ENTITY_ID_JSON_EXTRACT.search(self._row.event_data) return result and result.group(1) @property def data_domain(self): """Extract the domain from the decoded data or json.""" if self._event_data: return self._event_data.get(ATTR_DOMAIN) result = DOMAIN_JSON_EXTRACT.search(self._row.event_data) return result and result.group(1) @property def attributes(self): """State attributes.""" if not self._attributes: if ( self._row.attributes is None or self._row.attributes == EMPTY_JSON_OBJECT ): self._attributes = {} else: self._attributes = json.loads(self._row.attributes) return self._attributes @property def data(self): """Event data.""" if not self._event_data: if self._row.event_data == EMPTY_JSON_OBJECT: self._event_data = {} else: self._event_data = json.loads(self._row.event_data) return self._event_data @property def time_fired_isoformat(self): """Time event was fired in utc isoformat.""" if not self._time_fired_isoformat: self._time_fired_isoformat = process_timestamp_to_utc_isoformat( self._row.time_fired or dt_util.utcnow() ) return self._time_fired_isoformat class EntityAttributeCache: """A cache to lookup static entity_id attribute. This class should not be used to lookup attributes that are expected to change state. """ def __init__(self, hass): """Init the cache.""" self._hass = hass self._cache = {} def get(self, entity_id, attribute, event): """Lookup an attribute for an entity or get it from the cache.""" if entity_id in self._cache: if attribute in self._cache[entity_id]: return self._cache[entity_id][attribute] else: self._cache[entity_id] = {} current_state = self._hass.states.get(entity_id) if current_state: # Try the current state as its faster than decoding the # attributes self._cache[entity_id][attribute] = current_state.attributes.get(attribute) else: # If the entity has been removed, decode the attributes # instead self._cache[entity_id][attribute] = event.attributes.get(attribute) return self._cache[entity_id][attribute]
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/logbook/__init__.py
"""Support for Envisalink-based alarm control panels (Honeywell/DSC).""" import logging import voluptuous as vol from homeassistant.components.alarm_control_panel import ( FORMAT_NUMBER, AlarmControlPanelEntity, ) from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, SUPPORT_ALARM_ARM_NIGHT, SUPPORT_ALARM_TRIGGER, ) from homeassistant.const import ( ATTR_ENTITY_ID, CONF_CODE, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED, STATE_UNKNOWN, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_connect from . import ( CONF_PANIC, CONF_PARTITIONNAME, DATA_EVL, DOMAIN, PARTITION_SCHEMA, SIGNAL_KEYPAD_UPDATE, SIGNAL_PARTITION_UPDATE, EnvisalinkDevice, ) _LOGGER = logging.getLogger(__name__) SERVICE_ALARM_KEYPRESS = "alarm_keypress" ATTR_KEYPRESS = "keypress" ALARM_KEYPRESS_SCHEMA = vol.Schema( { vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_KEYPRESS): cv.string, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Perform the setup for Envisalink alarm panels.""" configured_partitions = discovery_info["partitions"] code = discovery_info[CONF_CODE] panic_type = discovery_info[CONF_PANIC] devices = [] for part_num in configured_partitions: device_config_data = PARTITION_SCHEMA(configured_partitions[part_num]) device = EnvisalinkAlarm( hass, part_num, device_config_data[CONF_PARTITIONNAME], code, panic_type, hass.data[DATA_EVL].alarm_state["partition"][part_num], hass.data[DATA_EVL], ) devices.append(device) async_add_entities(devices) @callback def alarm_keypress_handler(service): """Map services to methods on Alarm.""" entity_ids = service.data.get(ATTR_ENTITY_ID) keypress = service.data.get(ATTR_KEYPRESS) target_devices = [ device for device in devices if device.entity_id in entity_ids ] for device in target_devices: device.async_alarm_keypress(keypress) hass.services.async_register( DOMAIN, SERVICE_ALARM_KEYPRESS, alarm_keypress_handler, schema=ALARM_KEYPRESS_SCHEMA, ) return True class EnvisalinkAlarm(EnvisalinkDevice, AlarmControlPanelEntity): """Representation of an Envisalink-based alarm panel.""" def __init__( self, hass, partition_number, alarm_name, code, panic_type, info, controller ): """Initialize the alarm panel.""" self._partition_number = partition_number self._code = code self._panic_type = panic_type _LOGGER.debug("Setting up alarm: %s", alarm_name) super().__init__(alarm_name, info, controller) async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_KEYPAD_UPDATE, self._update_callback ) ) self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_PARTITION_UPDATE, self._update_callback ) ) @callback def _update_callback(self, partition): """Update Home Assistant state, if needed.""" if partition is None or int(partition) == self._partition_number: self.async_write_ha_state() @property def code_format(self): """Regex for code format or None if no code is required.""" if self._code: return None return FORMAT_NUMBER @property def state(self): """Return the state of the device.""" state = STATE_UNKNOWN if self._info["status"]["alarm"]: state = STATE_ALARM_TRIGGERED elif self._info["status"]["armed_zero_entry_delay"]: state = STATE_ALARM_ARMED_NIGHT elif self._info["status"]["armed_away"]: state = STATE_ALARM_ARMED_AWAY elif self._info["status"]["armed_stay"]: state = STATE_ALARM_ARMED_HOME elif self._info["status"]["exit_delay"]: state = STATE_ALARM_PENDING elif self._info["status"]["entry_delay"]: state = STATE_ALARM_PENDING elif self._info["status"]["alpha"]: state = STATE_ALARM_DISARMED return state @property def supported_features(self) -> int: """Return the list of supported features.""" return ( SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT | SUPPORT_ALARM_TRIGGER ) async def async_alarm_disarm(self, code=None): """Send disarm command.""" if code: self.hass.data[DATA_EVL].disarm_partition(str(code), self._partition_number) else: self.hass.data[DATA_EVL].disarm_partition( str(self._code), self._partition_number ) async def async_alarm_arm_home(self, code=None): """Send arm home command.""" if code: self.hass.data[DATA_EVL].arm_stay_partition( str(code), self._partition_number ) else: self.hass.data[DATA_EVL].arm_stay_partition( str(self._code), self._partition_number ) async def async_alarm_arm_away(self, code=None): """Send arm away command.""" if code: self.hass.data[DATA_EVL].arm_away_partition( str(code), self._partition_number ) else: self.hass.data[DATA_EVL].arm_away_partition( str(self._code), self._partition_number ) async def async_alarm_trigger(self, code=None): """Alarm trigger command. Will be used to trigger a panic alarm.""" self.hass.data[DATA_EVL].panic_alarm(self._panic_type) async def async_alarm_arm_night(self, code=None): """Send arm night command.""" self.hass.data[DATA_EVL].arm_night_partition( str(code) if code else str(self._code), self._partition_number ) @callback def async_alarm_keypress(self, keypress=None): """Send custom keypress.""" if keypress: self.hass.data[DATA_EVL].keypresses_to_partition( self._partition_number, keypress )
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/envisalink/alarm_control_panel.py
"""Support for file notification.""" import os import voluptuous as vol from homeassistant.components.notify import ( ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService, ) from homeassistant.const import CONF_FILENAME import homeassistant.helpers.config_validation as cv import homeassistant.util.dt as dt_util CONF_TIMESTAMP = "timestamp" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_FILENAME): cv.string, vol.Optional(CONF_TIMESTAMP, default=False): cv.boolean, } ) def get_service(hass, config, discovery_info=None): """Get the file notification service.""" filename = config[CONF_FILENAME] timestamp = config[CONF_TIMESTAMP] return FileNotificationService(hass, filename, timestamp) class FileNotificationService(BaseNotificationService): """Implement the notification service for the File service.""" def __init__(self, hass, filename, add_timestamp): """Initialize the service.""" self.filepath = os.path.join(hass.config.config_dir, filename) self.add_timestamp = add_timestamp def send_message(self, message="", **kwargs): """Send a message to a file.""" with open(self.filepath, "a") as file: if os.stat(self.filepath).st_size == 0: title = f"{kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)} notifications (Log started: {dt_util.utcnow().isoformat()})\n{'-' * 80}\n" file.write(title) if self.add_timestamp: text = f"{dt_util.utcnow().isoformat()} {message}\n" else: text = f"{message}\n" file.write(text)
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/file/notify.py
"""Config flow for Islamic Prayer Times integration.""" import voluptuous as vol from homeassistant import config_entries from homeassistant.core import callback # pylint: disable=unused-import from .const import CALC_METHODS, CONF_CALC_METHOD, DEFAULT_CALC_METHOD, DOMAIN, NAME class IslamicPrayerFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle the Islamic Prayer config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return IslamicPrayerOptionsFlowHandler(config_entry) async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is None: return self.async_show_form(step_id="user") return self.async_create_entry(title=NAME, data=user_input) async def async_step_import(self, import_config): """Import from config.""" return await self.async_step_user(user_input=import_config) class IslamicPrayerOptionsFlowHandler(config_entries.OptionsFlow): """Handle Islamic Prayer client options.""" def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) options = { vol.Optional( CONF_CALC_METHOD, default=self.config_entry.options.get( CONF_CALC_METHOD, DEFAULT_CALC_METHOD ), ): vol.In(CALC_METHODS) } return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/islamic_prayer_times/config_flow.py
"""ONVIF event abstraction.""" import asyncio import datetime as dt from typing import Callable, Dict, List, Optional, Set from httpx import RemoteProtocolError, TransportError from onvif import ONVIFCamera, ONVIFService from zeep.exceptions import Fault from homeassistant.core import CALLBACK_TYPE, CoreState, HomeAssistant, callback from homeassistant.helpers.event import async_call_later from homeassistant.util import dt as dt_util from .const import LOGGER from .models import Event from .parsers import PARSERS UNHANDLED_TOPICS = set() SUBSCRIPTION_ERRORS = ( Fault, asyncio.TimeoutError, TransportError, ) class EventManager: """ONVIF Event Manager.""" def __init__(self, hass: HomeAssistant, device: ONVIFCamera, unique_id: str): """Initialize event manager.""" self.hass: HomeAssistant = hass self.device: ONVIFCamera = device self.unique_id: str = unique_id self.started: bool = False self._subscription: ONVIFService = None self._events: Dict[str, Event] = {} self._listeners: List[CALLBACK_TYPE] = [] self._unsub_refresh: Optional[CALLBACK_TYPE] = None super().__init__() @property def platforms(self) -> Set[str]: """Return platforms to setup.""" return {event.platform for event in self._events.values()} @callback def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]: """Listen for data updates.""" # This is the first listener, set up polling. if not self._listeners: self.async_schedule_pull() self._listeners.append(update_callback) @callback def remove_listener() -> None: """Remove update listener.""" self.async_remove_listener(update_callback) return remove_listener @callback def async_remove_listener(self, update_callback: CALLBACK_TYPE) -> None: """Remove data update.""" if update_callback in self._listeners: self._listeners.remove(update_callback) if not self._listeners and self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None async def async_start(self) -> bool: """Start polling events.""" if await self.device.create_pullpoint_subscription(): # Create subscription manager self._subscription = self.device.create_subscription_service( "PullPointSubscription" ) # Renew immediately await self.async_renew() # Initialize events pullpoint = self.device.create_pullpoint_service() try: await pullpoint.SetSynchronizationPoint() except SUBSCRIPTION_ERRORS: pass response = await pullpoint.PullMessages( {"MessageLimit": 100, "Timeout": dt.timedelta(seconds=5)} ) # Parse event initialization await self.async_parse_messages(response.NotificationMessage) self.started = True return True return False async def async_stop(self) -> None: """Unsubscribe from events.""" self._listeners = [] self.started = False if not self._subscription: return await self._subscription.Unsubscribe() self._subscription = None async def async_restart(self, _now: dt = None) -> None: """Restart the subscription assuming the camera rebooted.""" if not self.started: return if self._subscription: try: await self._subscription.Unsubscribe() except SUBSCRIPTION_ERRORS: pass # Ignored. The subscription may no longer exist. self._subscription = None try: restarted = await self.async_start() except SUBSCRIPTION_ERRORS: restarted = False if not restarted: LOGGER.warning( "Failed to restart ONVIF PullPoint subscription for '%s'. Retrying...", self.unique_id, ) # Try again in a minute self._unsub_refresh = async_call_later(self.hass, 60, self.async_restart) elif self._listeners: LOGGER.debug( "Restarted ONVIF PullPoint subscription for '%s'", self.unique_id ) self.async_schedule_pull() async def async_renew(self) -> None: """Renew subscription.""" if not self._subscription: return termination_time = ( (dt_util.utcnow() + dt.timedelta(days=1)) .isoformat(timespec="seconds") .replace("+00:00", "Z") ) await self._subscription.Renew(termination_time) def async_schedule_pull(self) -> None: """Schedule async_pull_messages to run.""" self._unsub_refresh = async_call_later(self.hass, 1, self.async_pull_messages) async def async_pull_messages(self, _now: dt = None) -> None: """Pull messages from device.""" if self.hass.state == CoreState.running: try: pullpoint = self.device.create_pullpoint_service() response = await pullpoint.PullMessages( {"MessageLimit": 100, "Timeout": dt.timedelta(seconds=60)} ) # Renew subscription if less than two hours is left if ( dt_util.as_utc(response.TerminationTime) - dt_util.utcnow() ).total_seconds() < 7200: await self.async_renew() except RemoteProtocolError: # Likley a shutdown event, nothing to see here return except SUBSCRIPTION_ERRORS as err: LOGGER.warning( "Failed to fetch ONVIF PullPoint subscription messages for '%s': %s", self.unique_id, err, ) # Treat errors as if the camera restarted. Assume that the pullpoint # subscription is no longer valid. self._unsub_refresh = None await self.async_restart() return # Parse response await self.async_parse_messages(response.NotificationMessage) # Update entities for update_callback in self._listeners: update_callback() # Reschedule another pull if self._listeners: self.async_schedule_pull() # pylint: disable=protected-access async def async_parse_messages(self, messages) -> None: """Parse notification message.""" for msg in messages: # Guard against empty message if not msg.Topic: continue topic = msg.Topic._value_1 parser = PARSERS.get(topic) if not parser: if topic not in UNHANDLED_TOPICS: LOGGER.info( "No registered handler for event from %s: %s", self.unique_id, msg, ) UNHANDLED_TOPICS.add(topic) continue event = await parser(self.unique_id, msg) if not event: LOGGER.warning("Unable to parse event from %s: %s", self.unique_id, msg) return self._events[event.uid] = event def get_uid(self, uid) -> Event: """Retrieve event for given id.""" return self._events[uid] def get_platform(self, platform) -> List[Event]: """Retrieve events for given platform.""" return [event for event in self._events.values() if event.platform == platform]
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/onvif/event.py
"""Support for USCIS Case Status.""" from datetime import timedelta import logging import uscisstatus import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "USCIS" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required("case_id"): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the platform in Home Assistant and Case Information.""" uscis = UscisSensor(config["case_id"], config[CONF_NAME]) uscis.update() if uscis.valid_case_id: add_entities([uscis]) else: _LOGGER.error("Setup USCIS Sensor Fail check if your Case ID is Valid") class UscisSensor(Entity): """USCIS Sensor will check case status on daily basis.""" MIN_TIME_BETWEEN_UPDATES = timedelta(hours=24) CURRENT_STATUS = "current_status" LAST_CASE_UPDATE = "last_update_date" def __init__(self, case, name): """Initialize the sensor.""" self._state = None self._case_id = case self._attributes = None self.valid_case_id = None self._name = name @property def name(self): """Return the name.""" return self._name @property def state(self): """Return the state.""" return self._state @property def device_state_attributes(self): """Return the state attributes.""" return self._attributes @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Fetch data from the USCIS website and update state attributes.""" try: status = uscisstatus.get_case_status(self._case_id) self._attributes = {self.CURRENT_STATUS: status["status"]} self._state = status["date"] self.valid_case_id = True except ValueError: _LOGGER("Please Check that you have valid USCIS case id") self.valid_case_id = False
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/uscis/sensor.py
"""Config flow for Rollease Acmeda Automate Pulse Hub.""" import asyncio from typing import Dict, Optional import aiopulse import async_timeout import voluptuous as vol from homeassistant import config_entries from .const import DOMAIN # pylint: disable=unused-import class AcmedaFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a Acmeda config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initialize the config flow.""" self.discovered_hubs: Optional[Dict[str, aiopulse.Hub]] = None async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" if ( user_input is not None and self.discovered_hubs is not None and user_input["id"] in self.discovered_hubs ): return await self.async_create(self.discovered_hubs[user_input["id"]]) # Already configured hosts already_configured = { entry.unique_id for entry in self._async_current_entries() } hubs = [] try: with async_timeout.timeout(5): async for hub in aiopulse.Hub.discover(): if hub.id not in already_configured: hubs.append(hub) except asyncio.TimeoutError: pass if len(hubs) == 0: return self.async_abort(reason="no_devices_found") if len(hubs) == 1: return await self.async_create(hubs[0]) self.discovered_hubs = {hub.id: hub for hub in hubs} return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required("id"): vol.In( {hub.id: f"{hub.id} {hub.host}" for hub in hubs} ) } ), ) async def async_create(self, hub): """Create the Acmeda Hub entry.""" await self.async_set_unique_id(hub.id, raise_on_progress=False) return self.async_create_entry(title=hub.id, data={"host": hub.host})
"""Test Hue init with multiple bridges.""" from unittest.mock import Mock, patch from aiohue.groups import Groups from aiohue.lights import Lights from aiohue.scenes import Scenes from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base from homeassistant.setup import async_setup_component async def setup_component(hass): """Hue component.""" with patch.object(hue, "async_setup_entry", return_value=True): assert ( await async_setup_component( hass, hue.DOMAIN, {}, ) is True ) async def test_hue_activate_scene_both_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes both bridges successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_one_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes only one bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=None ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) mock_hue_activate_scene1.assert_called_once() mock_hue_activate_scene2.assert_called_once() async def test_hue_activate_scene_zero_responds( hass, mock_bridge1, mock_bridge2, mock_config_entry1, mock_config_entry2 ): """Test that makes no bridge successfully activate a scene.""" await setup_component(hass) await setup_bridge(hass, mock_bridge1, mock_config_entry1) await setup_bridge(hass, mock_bridge2, mock_config_entry2) with patch.object( mock_bridge1, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene1, patch.object( mock_bridge2, "hue_activate_scene", return_value=False ) as mock_hue_activate_scene2: await hass.services.async_call( "hue", "hue_activate_scene", {"group_name": "group_2", "scene_name": "my_scene"}, blocking=True, ) # both were retried assert mock_hue_activate_scene1.call_count == 2 assert mock_hue_activate_scene2.call_count == 2 async def setup_bridge(hass, mock_bridge, config_entry): """Load the Hue light platform with the provided bridge.""" mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN][config_entry.entry_id] = mock_bridge await hass.config_entries.async_forward_entry_setup(config_entry, "light") # To flush out the service call to update the group await hass.async_block_till_done() @pytest.fixture def mock_config_entry1(hass): """Mock a config entry.""" return create_config_entry() @pytest.fixture def mock_config_entry2(hass): """Mock a config entry.""" return create_config_entry() def create_config_entry(): """Mock a config entry.""" return config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": "mock-host"}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) @pytest.fixture def mock_bridge1(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) @pytest.fixture def mock_bridge2(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) return {} async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) bridge.api.sensors = Sensors({}, mock_request) bridge.api.scenes = Scenes({}, mock_request) return bridge
turbokongen/home-assistant
tests/components/hue/test_init_multiple_bridges.py
homeassistant/components/acmeda/config_flow.py