input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
"""Demo platform that offers a fake water heater device.""" from homeassistant.components.water_heater import ( SUPPORT_AWAY_MODE, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, WaterHeaterEntity, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT SUPPORT_FLAGS_HEATER = ( SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Demo water_heater devices.""" async_add_entities( [ DemoWaterHeater("Demo Water Heater", 119, TEMP_FAHRENHEIT, False, "eco"), DemoWaterHeater("Demo Water Heater Celsius", 45, TEMP_CELSIUS, True, "eco"), ] ) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Demo config entry.""" await async_setup_platform(hass, {}, async_add_entities) class DemoWaterHeater(WaterHeaterEntity): """Representation of a demo water_heater device.""" def __init__( self, name, target_temperature, unit_of_measurement, away, current_operation ): """Initialize the water_heater device.""" self._name = name self._support_flags = SUPPORT_FLAGS_HEATER if target_temperature is not None: self._support_flags = self._support_flags | SUPPORT_TARGET_TEMPERATURE if away is not None: self._support_flags = self._support_flags | SUPPORT_AWAY_MODE if current_operation is not None: self._support_flags = self._support_flags | SUPPORT_OPERATION_MODE self._target_temperature = target_temperature self._unit_of_measurement = unit_of_measurement self._away = away self._current_operation = current_operation self._operation_list = [ "eco", "electric", "performance", "high_demand", "heat_pump", "gas", "off", ] @property def supported_features(self): """Return the list of supported features.""" return self._support_flags @property def should_poll(self): """Return the polling state.""" return False @property def name(self): """Return the name of the water_heater device.""" return self._name @property def temperature_unit(self): """Return the unit of measurement.""" return self._unit_of_measurement @property def target_temperature(self): """Return the temperature we try to reach.""" return self._target_temperature @property def current_operation(self): """Return current operation ie. heat, cool, idle.""" return self._current_operation @property def operation_list(self): """Return the list of available operation modes.""" return self._operation_list @property def is_away_mode_on(self): """Return if away mode is on.""" return self._away def set_temperature(self, **kwargs): """Set new target temperatures.""" self._target_temperature = kwargs.get(ATTR_TEMPERATURE) self.schedule_update_ha_state() def set_operation_mode(self, operation_mode): """Set new operation mode.""" self._current_operation = operation_mode self.schedule_update_ha_state() def turn_away_mode_on(self): """Turn away mode on.""" self._away = True self.schedule_update_ha_state() def turn_away_mode_off(self): """Turn away mode off.""" self._away = False self.schedule_update_ha_state()
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/demo/water_heater.py
"""Constant values for pvpc_hourly_pricing.""" from aiopvpc import TARIFFS DOMAIN = "pvpc_hourly_pricing" PLATFORM = "sensor" ATTR_TARIFF = "tariff" DEFAULT_NAME = "PVPC" DEFAULT_TARIFF = TARIFFS[1]
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/pvpc_hourly_pricing/const.py
"""Support for locks which integrates with other components.""" import logging import voluptuous as vol from homeassistant.components.lock import PLATFORM_SCHEMA, LockEntity from homeassistant.const import ( CONF_NAME, CONF_OPTIMISTIC, CONF_VALUE_TEMPLATE, EVENT_HOMEASSISTANT_START, MATCH_ALL, STATE_LOCKED, STATE_ON, ) from homeassistant.core import callback from homeassistant.exceptions import TemplateError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_track_state_change_event from homeassistant.helpers.script import Script from . import extract_entities, initialise_templates from .const import CONF_AVAILABILITY_TEMPLATE _LOGGER = logging.getLogger(__name__) CONF_LOCK = "lock" CONF_UNLOCK = "unlock" DEFAULT_NAME = "Template Lock" DEFAULT_OPTIMISTIC = False PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required(CONF_LOCK): cv.SCRIPT_SCHEMA, vol.Required(CONF_UNLOCK): cv.SCRIPT_SCHEMA, vol.Required(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, } ) async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Template lock.""" device = config.get(CONF_NAME) value_template = config.get(CONF_VALUE_TEMPLATE) availability_template = config.get(CONF_AVAILABILITY_TEMPLATE) templates = { CONF_VALUE_TEMPLATE: value_template, CONF_AVAILABILITY_TEMPLATE: availability_template, } initialise_templates(hass, templates) entity_ids = extract_entities(device, "lock", None, templates) async_add_devices( [ TemplateLock( hass, device, value_template, availability_template, entity_ids, config.get(CONF_LOCK), config.get(CONF_UNLOCK), config.get(CONF_OPTIMISTIC), ) ] ) class TemplateLock(LockEntity): """Representation of a template lock.""" def __init__( self, hass, name, value_template, availability_template, entity_ids, command_lock, command_unlock, optimistic, ): """Initialize the lock.""" self._state = None self._hass = hass self._name = name self._state_template = value_template self._availability_template = availability_template self._state_entities = entity_ids self._command_lock = Script(hass, command_lock) self._command_unlock = Script(hass, command_unlock) self._optimistic = optimistic self._available = True async def async_added_to_hass(self): """Register callbacks.""" @callback def template_lock_state_listener(event): """Handle target device state changes.""" self.async_schedule_update_ha_state(True) @callback def template_lock_startup(event): """Update template on startup.""" if self._state_entities != MATCH_ALL: # Track state change only for valid templates async_track_state_change_event( self._hass, self._state_entities, template_lock_state_listener ) self.async_schedule_update_ha_state(True) self._hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, template_lock_startup ) @property def assumed_state(self): """Return true if we do optimistic updates.""" return self._optimistic @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the lock.""" return self._name @property def is_locked(self): """Return true if lock is locked.""" return self._state @property def available(self) -> bool: """Return if the device is available.""" return self._available async def async_update(self): """Update the state from the template.""" try: self._state = self._state_template.async_render().lower() in ( "true", STATE_ON, STATE_LOCKED, ) except TemplateError as ex: self._state = None _LOGGER.error("Could not render template %s: %s", self._name, ex) if self._availability_template is not None: try: self._available = ( self._availability_template.async_render().lower() == "true" ) except (TemplateError, ValueError) as ex: _LOGGER.error( "Could not render %s template %s: %s", CONF_AVAILABILITY_TEMPLATE, self._name, ex, ) async def async_lock(self, **kwargs): """Lock the device.""" if self._optimistic: self._state = True self.async_write_ha_state() await self._command_lock.async_run(context=self._context) async def async_unlock(self, **kwargs): """Unlock the device.""" if self._optimistic: self._state = False self.async_write_ha_state() await self._command_unlock.async_run(context=self._context)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/template/lock.py
"""Support for iOS push notifications.""" import logging import requests from homeassistant.components import ios from homeassistant.components.notify import ( ATTR_DATA, ATTR_MESSAGE, ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, BaseNotificationService, ) import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) PUSH_URL = "https://ios-push.home-assistant.io/push" # pylint: disable=invalid-name def log_rate_limits(hass, target, resp, level=20): """Output rate limit log line at given level.""" rate_limits = resp["rateLimits"] resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"]) resetsAtTime = resetsAt - dt_util.utcnow() rate_limit_msg = ( "iOS push notification rate limits for %s: " "%d sent, %d allowed, %d errors, " "resets in %s" ) _LOGGER.log( level, rate_limit_msg, ios.device_name_for_push_id(hass, target), rate_limits["successful"], rate_limits["maximum"], rate_limits["errors"], str(resetsAtTime).split(".")[0], ) def get_service(hass, config, discovery_info=None): """Get the iOS notification service.""" if "notify.ios" not in hass.config.components: # Need this to enable requirements checking in the app. hass.config.components.add("notify.ios") if not ios.devices_with_push(hass): return None return iOSNotificationService() class iOSNotificationService(BaseNotificationService): """Implement the notification service for iOS.""" def __init__(self): """Initialize the service.""" @property def targets(self): """Return a dictionary of registered targets.""" return ios.devices_with_push(self.hass) def send_message(self, message="", **kwargs): """Send a message to the Lambda APNS gateway.""" data = {ATTR_MESSAGE: message} if kwargs.get(ATTR_TITLE) is not None: # Remove default title from notifications. if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT: data[ATTR_TITLE] = kwargs.get(ATTR_TITLE) targets = kwargs.get(ATTR_TARGET) if not targets: targets = ios.enabled_push_ids(self.hass) if kwargs.get(ATTR_DATA) is not None: data[ATTR_DATA] = kwargs.get(ATTR_DATA) for target in targets: if target not in ios.enabled_push_ids(self.hass): _LOGGER.error("The target (%s) does not exist in .ios.conf", targets) return data[ATTR_TARGET] = target req = requests.post(PUSH_URL, json=data, timeout=10) if req.status_code != 201: fallback_error = req.json().get("errorMessage", "Unknown error") fallback_message = ( f"Internal server error, please try again later: {fallback_error}" ) message = req.json().get("message", fallback_message) if req.status_code == 429: _LOGGER.warning(message) log_rate_limits(self.hass, target, req.json(), 30) else: _LOGGER.error(message) else: log_rate_limits(self.hass, target, req.json())
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/ios/notify.py
"""Support for LG soundbars.""" import logging import temescal from homeassistant.components.media_player import MediaPlayerEntity from homeassistant.components.media_player.const import ( SUPPORT_SELECT_SOUND_MODE, SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, ) from homeassistant.const import STATE_ON _LOGGER = logging.getLogger(__name__) SUPPORT_LG = ( SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | SUPPORT_SELECT_SOUND_MODE ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the LG platform.""" if discovery_info is not None: add_entities([LGDevice(discovery_info)], True) class LGDevice(MediaPlayerEntity): """Representation of an LG soundbar device.""" def __init__(self, discovery_info): """Initialize the LG speakers.""" host = discovery_info.get("host") port = discovery_info.get("port") self._name = "" self._volume = 0 self._volume_min = 0 self._volume_max = 0 self._function = -1 self._functions = [] self._equaliser = -1 self._equalisers = [] self._mute = 0 self._rear_volume = 0 self._rear_volume_min = 0 self._rear_volume_max = 0 self._woofer_volume = 0 self._woofer_volume_min = 0 self._woofer_volume_max = 0 self._bass = 0 self._treble = 0 self._device = temescal.temescal(host, port=port, callback=self.handle_event) self.update() def handle_event(self, response): """Handle responses from the speakers.""" data = response["data"] if response["msg"] == "EQ_VIEW_INFO": if "i_bass" in data: self._bass = data["i_bass"] if "i_treble" in data: self._treble = data["i_treble"] if "ai_eq_list" in data: self._equalisers = data["ai_eq_list"] if "i_curr_eq" in data: self._equaliser = data["i_curr_eq"] elif response["msg"] == "SPK_LIST_VIEW_INFO": if "i_vol" in data: self._volume = data["i_vol"] if "s_user_name" in data: self._name = data["s_user_name"] if "i_vol_min" in data: self._volume_min = data["i_vol_min"] if "i_vol_max" in data: self._volume_max = data["i_vol_max"] if "b_mute" in data: self._mute = data["b_mute"] if "i_curr_func" in data: self._function = data["i_curr_func"] elif response["msg"] == "FUNC_VIEW_INFO": if "i_curr_func" in data: self._function = data["i_curr_func"] if "ai_func_list" in data: self._functions = data["ai_func_list"] elif response["msg"] == "SETTING_VIEW_INFO": if "i_rear_min" in data: self._rear_volume_min = data["i_rear_min"] if "i_rear_max" in data: self._rear_volume_max = data["i_rear_max"] if "i_rear_level" in data: self._rear_volume = data["i_rear_level"] if "i_woofer_min" in data: self._woofer_volume_min = data["i_woofer_min"] if "i_woofer_max" in data: self._woofer_volume_max = data["i_woofer_max"] if "i_woofer_level" in data: self._woofer_volume = data["i_woofer_level"] if "i_curr_eq" in data: self._equaliser = data["i_curr_eq"] if "s_user_name" in data: self._name = data["s_user_name"] self.schedule_update_ha_state() def update(self): """Trigger updates from the device.""" self._device.get_eq() self._device.get_info() self._device.get_func() self._device.get_settings() self._device.get_product_info() # Temporary fix until handling of unknown equaliser settings is integrated in the temescal library for equaliser in self._equalisers: if equaliser >= len(temescal.equalisers): temescal.equalisers.append("unknown " + str(equaliser)) @property def name(self): """Return the name of the device.""" return self._name @property def volume_level(self): """Volume level of the media player (0..1).""" if self._volume_max != 0: return self._volume / self._volume_max return 0 @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._mute @property def state(self): """Return the state of the device.""" return STATE_ON @property def sound_mode(self): """Return the current sound mode.""" if self._equaliser == -1 or self._equaliser >= len(temescal.equalisers): return None return temescal.equalisers[self._equaliser] @property def sound_mode_list(self): """Return the available sound modes.""" modes = [] for equaliser in self._equalisers: modes.append(temescal.equalisers[equaliser]) return sorted(modes) @property def source(self): """Return the current input source.""" if self._function == -1: return None return temescal.functions[self._function] @property def source_list(self): """List of available input sources.""" sources = [] for function in self._functions: sources.append(temescal.functions[function]) return sorted(sources) @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_LG def set_volume_level(self, volume): """Set volume level, range 0..1.""" volume = volume * self._volume_max self._device.set_volume(int(volume)) def mute_volume(self, mute): """Mute (true) or unmute (false) media player.""" self._device.set_mute(mute) def select_source(self, source): """Select input source.""" self._device.set_func(temescal.functions.index(source)) def select_sound_mode(self, sound_mode): """Set Sound Mode for Receiver..""" self._device.set_eq(temescal.equalisers.index(sound_mode))
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/lg_soundbar/media_player.py
"""Support for eQ-3 Bluetooth Smart thermostats.""" import logging # pylint: disable=import-error from bluepy.btle import BTLEException import eq3bt as eq3 # pylint: disable=import-error import voluptuous as vol from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity from homeassistant.components.climate.const import ( HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_AWAY, PRESET_BOOST, PRESET_NONE, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ( ATTR_TEMPERATURE, CONF_DEVICES, CONF_MAC, PRECISION_HALVES, TEMP_CELSIUS, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) STATE_BOOST = "boost" ATTR_STATE_WINDOW_OPEN = "window_open" ATTR_STATE_VALVE = "valve" ATTR_STATE_LOCKED = "is_locked" ATTR_STATE_LOW_BAT = "low_battery" ATTR_STATE_AWAY_END = "away_end" EQ_TO_HA_HVAC = { eq3.Mode.Open: HVAC_MODE_HEAT, eq3.Mode.Closed: HVAC_MODE_OFF, eq3.Mode.Auto: HVAC_MODE_AUTO, eq3.Mode.Manual: HVAC_MODE_HEAT, eq3.Mode.Boost: HVAC_MODE_AUTO, eq3.Mode.Away: HVAC_MODE_HEAT, } HA_TO_EQ_HVAC = { HVAC_MODE_HEAT: eq3.Mode.Manual, HVAC_MODE_OFF: eq3.Mode.Closed, HVAC_MODE_AUTO: eq3.Mode.Auto, } EQ_TO_HA_PRESET = {eq3.Mode.Boost: PRESET_BOOST, eq3.Mode.Away: PRESET_AWAY} HA_TO_EQ_PRESET = {PRESET_BOOST: eq3.Mode.Boost, PRESET_AWAY: eq3.Mode.Away} DEVICE_SCHEMA = vol.Schema({vol.Required(CONF_MAC): cv.string}) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_DEVICES): vol.Schema({cv.string: DEVICE_SCHEMA})} ) SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the eQ-3 BLE thermostats.""" devices = [] for name, device_cfg in config[CONF_DEVICES].items(): mac = device_cfg[CONF_MAC] devices.append(EQ3BTSmartThermostat(mac, name)) add_entities(devices, True) class EQ3BTSmartThermostat(ClimateEntity): """Representation of an eQ-3 Bluetooth Smart thermostat.""" def __init__(self, _mac, _name): """Initialize the thermostat.""" # We want to avoid name clash with this module. self._name = _name self._thermostat = eq3.Thermostat(_mac) @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS @property def available(self) -> bool: """Return if thermostat is available.""" return self._thermostat.mode >= 0 @property def name(self): """Return the name of the device.""" return self._name @property def temperature_unit(self): """Return the unit of measurement that is used.""" return TEMP_CELSIUS @property def precision(self): """Return eq3bt's precision 0.5.""" return PRECISION_HALVES @property def current_temperature(self): """Can not report temperature, so return target_temperature.""" return self.target_temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._thermostat.target_temperature def set_temperature(self, **kwargs): """Set new target temperature.""" temperature = kwargs.get(ATTR_TEMPERATURE) if temperature is None: return self._thermostat.target_temperature = temperature @property def hvac_mode(self): """Return the current operation mode.""" if self._thermostat.mode < 0: return HVAC_MODE_OFF return EQ_TO_HA_HVAC[self._thermostat.mode] @property def hvac_modes(self): """Return the list of available operation modes.""" return list(HA_TO_EQ_HVAC.keys()) def set_hvac_mode(self, hvac_mode): """Set operation mode.""" if self.preset_mode: return self._thermostat.mode = HA_TO_EQ_HVAC[hvac_mode] @property def min_temp(self): """Return the minimum temperature.""" return self._thermostat.min_temp @property def max_temp(self): """Return the maximum temperature.""" return self._thermostat.max_temp @property def device_state_attributes(self): """Return the device specific state attributes.""" dev_specific = { ATTR_STATE_AWAY_END: self._thermostat.away_end, ATTR_STATE_LOCKED: self._thermostat.locked, ATTR_STATE_LOW_BAT: self._thermostat.low_battery, ATTR_STATE_VALVE: self._thermostat.valve_state, ATTR_STATE_WINDOW_OPEN: self._thermostat.window_open, } return dev_specific @property def preset_mode(self): """Return the current preset mode, e.g., home, away, temp. Requires SUPPORT_PRESET_MODE. """ return EQ_TO_HA_PRESET.get(self._thermostat.mode) @property def preset_modes(self): """Return a list of available preset modes. Requires SUPPORT_PRESET_MODE. """ return list(HA_TO_EQ_PRESET.keys()) def set_preset_mode(self, preset_mode): """Set new preset mode.""" if preset_mode == PRESET_NONE: self.set_hvac_mode(HVAC_MODE_HEAT) self._thermostat.mode = HA_TO_EQ_PRESET[preset_mode] def update(self): """Update the data from the thermostat.""" try: self._thermostat.update() except BTLEException as ex: _LOGGER.warning("Updating the state failed: %s", ex)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/eq3btsmart/climate.py
"""The songpal component.""" from collections import OrderedDict import logging import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import CONF_NAME from homeassistant.helpers import config_validation as cv from homeassistant.helpers.typing import HomeAssistantType from .const import CONF_ENDPOINT, DOMAIN _LOGGER = logging.getLogger(__name__) SONGPAL_CONFIG_SCHEMA = vol.Schema( {vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_ENDPOINT): cv.string} ) CONFIG_SCHEMA = vol.Schema( {vol.Optional(DOMAIN): vol.All(cv.ensure_list, [SONGPAL_CONFIG_SCHEMA])}, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistantType, config: OrderedDict) -> bool: """Set up songpal environment.""" conf = config.get(DOMAIN) if conf is None: return True for config_entry in conf: hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=config_entry, ), ) return True async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool: """Set up songpal media player.""" hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, "media_player") ) return True async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool: """Unload songpal media player.""" return await hass.config_entries.async_forward_entry_unload(entry, "media_player")
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/songpal/__init__.py
"""Support for Luftdaten stations.""" import logging from luftdaten import Luftdaten from luftdaten.exceptions import LuftdatenError import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SHOW_ON_MAP, TEMP_CELSIUS, UNIT_PERCENTAGE, ) from homeassistant.core import callback from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_track_time_interval from .config_flow import configured_sensors, duplicate_stations from .const import CONF_SENSOR_ID, DEFAULT_SCAN_INTERVAL, DOMAIN _LOGGER = logging.getLogger(__name__) DATA_LUFTDATEN = "luftdaten" DATA_LUFTDATEN_CLIENT = "data_luftdaten_client" DATA_LUFTDATEN_LISTENER = "data_luftdaten_listener" DEFAULT_ATTRIBUTION = "Data provided by luftdaten.info" SENSOR_HUMIDITY = "humidity" SENSOR_PM10 = "P1" SENSOR_PM2_5 = "P2" SENSOR_PRESSURE = "pressure" SENSOR_PRESSURE_AT_SEALEVEL = "pressure_at_sealevel" SENSOR_TEMPERATURE = "temperature" TOPIC_UPDATE = f"{DOMAIN}_data_update" SENSORS = { SENSOR_TEMPERATURE: ["Temperature", "mdi:thermometer", TEMP_CELSIUS], SENSOR_HUMIDITY: ["Humidity", "mdi:water-percent", UNIT_PERCENTAGE], SENSOR_PRESSURE: ["Pressure", "mdi:arrow-down-bold", "Pa"], SENSOR_PRESSURE_AT_SEALEVEL: ["Pressure at sealevel", "mdi:download", "Pa"], SENSOR_PM10: [ "PM10", "mdi:thought-bubble", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, ], SENSOR_PM2_5: [ "PM2.5", "mdi:thought-bubble-outline", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, ], } SENSOR_SCHEMA = vol.Schema( { vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All( cv.ensure_list, [vol.In(SENSORS)] ) } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_SENSOR_ID): cv.positive_int, vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA, vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean, vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL ): cv.time_period, } ) }, extra=vol.ALLOW_EXTRA, ) @callback def _async_fixup_sensor_id(hass, config_entry, sensor_id): hass.config_entries.async_update_entry( config_entry, data={**config_entry.data, CONF_SENSOR_ID: int(sensor_id)} ) async def async_setup(hass, config): """Set up the Luftdaten component.""" hass.data[DOMAIN] = {} hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT] = {} hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER] = {} if DOMAIN not in config: return True conf = config[DOMAIN] station_id = conf[CONF_SENSOR_ID] if station_id not in configured_sensors(hass): hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_SENSORS: conf[CONF_SENSORS], CONF_SENSOR_ID: conf[CONF_SENSOR_ID], CONF_SHOW_ON_MAP: conf[CONF_SHOW_ON_MAP], }, ) ) hass.data[DOMAIN][CONF_SCAN_INTERVAL] = conf[CONF_SCAN_INTERVAL] return True async def async_setup_entry(hass, config_entry): """Set up Luftdaten as config entry.""" if not isinstance(config_entry.data[CONF_SENSOR_ID], int): _async_fixup_sensor_id(hass, config_entry, config_entry.data[CONF_SENSOR_ID]) if ( config_entry.data[CONF_SENSOR_ID] in duplicate_stations(hass) and config_entry.source == SOURCE_IMPORT ): _LOGGER.warning( "Removing duplicate sensors for station %s", config_entry.data[CONF_SENSOR_ID], ) hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id)) return False session = async_get_clientsession(hass) try: luftdaten = LuftDatenData( Luftdaten(config_entry.data[CONF_SENSOR_ID], hass.loop, session), config_entry.data.get(CONF_SENSORS, {}).get( CONF_MONITORED_CONDITIONS, list(SENSORS) ), ) await luftdaten.async_update() hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT][config_entry.entry_id] = luftdaten except LuftdatenError: raise ConfigEntryNotReady hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, "sensor") ) async def refresh_sensors(event_time): """Refresh Luftdaten data.""" await luftdaten.async_update() async_dispatcher_send(hass, TOPIC_UPDATE) hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER][ config_entry.entry_id ] = async_track_time_interval( hass, refresh_sensors, hass.data[DOMAIN].get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL), ) return True async def async_unload_entry(hass, config_entry): """Unload an Luftdaten config entry.""" remove_listener = hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER].pop( config_entry.entry_id ) remove_listener() hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT].pop(config_entry.entry_id) return await hass.config_entries.async_forward_entry_unload(config_entry, "sensor") class LuftDatenData: """Define a generic Luftdaten object.""" def __init__(self, client, sensor_conditions): """Initialize the Luftdata object.""" self.client = client self.data = {} self.sensor_conditions = sensor_conditions async def async_update(self): """Update sensor/binary sensor data.""" try: await self.client.get_data() self.data[DATA_LUFTDATEN] = self.client.values self.data[DATA_LUFTDATEN].update(self.client.meta) except LuftdatenError: _LOGGER.error("Unable to retrieve data from luftdaten.info")
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/luftdaten/__init__.py
"""Support for Freebox Delta, Revolution and Mini 4K.""" import logging from typing import Dict from aiofreepybox.exceptions import InsufficientPermissionsError from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from .const import DOMAIN from .router import FreeboxRouter _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the switch.""" router = hass.data[DOMAIN][entry.unique_id] async_add_entities([FreeboxWifiSwitch(router)], True) class FreeboxWifiSwitch(SwitchEntity): """Representation of a freebox wifi switch.""" def __init__(self, router: FreeboxRouter) -> None: """Initialize the Wifi switch.""" self._name = "Freebox WiFi" self._state = None self._router = router self._unique_id = f"{self._router.mac} {self._name}" @property def unique_id(self) -> str: """Return a unique ID.""" return self._unique_id @property def name(self) -> str: """Return the name of the switch.""" return self._name @property def is_on(self) -> bool: """Return true if device is on.""" return self._state @property def device_info(self) -> Dict[str, any]: """Return the device information.""" return self._router.device_info async def _async_set_state(self, enabled: bool): """Turn the switch on or off.""" wifi_config = {"enabled": enabled} try: await self._router.wifi.set_global_config(wifi_config) except InsufficientPermissionsError: _LOGGER.warning( "Home Assistant does not have permissions to modify the Freebox settings. Please refer to documentation" ) async def async_turn_on(self, **kwargs): """Turn the switch on.""" await self._async_set_state(True) async def async_turn_off(self, **kwargs): """Turn the switch off.""" await self._async_set_state(False) async def async_update(self): """Get the state and update it.""" datas = await self._router.wifi.get_global_config() active = datas["enabled"] self._state = bool(active)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/freebox/switch.py
"""Support for LIRC devices.""" # pylint: disable=no-member, import-error import logging import threading import time import lirc import voluptuous as vol from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP _LOGGER = logging.getLogger(__name__) BUTTON_NAME = "button_name" DOMAIN = "lirc" EVENT_IR_COMMAND_RECEIVED = "ir_command_received" ICON = "mdi:remote" CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the LIRC capability.""" # blocking=True gives unexpected behavior (multiple responses for 1 press) # also by not blocking, we allow hass to shut down the thread gracefully # on exit. lirc.init("home-assistant", blocking=False) lirc_interface = LircInterface(hass) def _start_lirc(_event): lirc_interface.start() def _stop_lirc(_event): lirc_interface.stopped.set() hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_lirc) hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_lirc) return True class LircInterface(threading.Thread): """ This interfaces with the lirc daemon to read IR commands. When using lirc in blocking mode, sometimes repeated commands get produced in the next read of a command so we use a thread here to just wait around until a non-empty response is obtained from lirc. """ def __init__(self, hass): """Construct a LIRC interface object.""" threading.Thread.__init__(self) self.daemon = True self.stopped = threading.Event() self.hass = hass def run(self): """Run the loop of the LIRC interface thread.""" _LOGGER.debug("LIRC interface thread started") while not self.stopped.isSet(): try: code = lirc.nextcode() # list; empty if no buttons pressed except lirc.NextCodeError: _LOGGER.warning("Error reading next code from LIRC") code = None # interpret result from python-lirc if code: code = code[0] _LOGGER.info("Got new LIRC code %s", code) self.hass.bus.fire(EVENT_IR_COMMAND_RECEIVED, {BUTTON_NAME: code}) else: time.sleep(0.2) lirc.deinit() _LOGGER.debug("LIRC interface thread stopped")
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/lirc/__init__.py
"""Switches for the Elexa Guardian integration.""" from typing import Callable, Dict from aioguardian import Client from aioguardian.errors import GuardianError import voluptuous as vol from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_FILENAME, CONF_PORT, CONF_URL from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import config_validation as cv, entity_platform from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from . import GuardianEntity from .const import API_VALVE_STATUS, DATA_CLIENT, DATA_COORDINATOR, DOMAIN, LOGGER ATTR_AVG_CURRENT = "average_current" ATTR_INST_CURRENT = "instantaneous_current" ATTR_INST_CURRENT_DDT = "instantaneous_current_ddt" ATTR_TRAVEL_COUNT = "travel_count" SERVICE_DISABLE_AP = "disable_ap" SERVICE_ENABLE_AP = "enable_ap" SERVICE_REBOOT = "reboot" SERVICE_RESET_VALVE_DIAGNOSTICS = "reset_valve_diagnostics" SERVICE_UPGRADE_FIRMWARE = "upgrade_firmware" async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable ) -> None: """Set up Guardian switches based on a config entry.""" platform = entity_platform.current_platform.get() for service_name, schema, method in [ (SERVICE_DISABLE_AP, {}, "async_disable_ap"), (SERVICE_ENABLE_AP, {}, "async_enable_ap"), (SERVICE_REBOOT, {}, "async_reboot"), (SERVICE_RESET_VALVE_DIAGNOSTICS, {}, "async_reset_valve_diagnostics"), ( SERVICE_UPGRADE_FIRMWARE, { vol.Optional(CONF_URL): cv.url, vol.Optional(CONF_PORT): cv.port, vol.Optional(CONF_FILENAME): cv.string, }, "async_upgrade_firmware", ), ]: platform.async_register_entity_service(service_name, schema, method) async_add_entities( [ GuardianSwitch( entry, hass.data[DOMAIN][DATA_CLIENT][entry.entry_id], hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id], ) ], True, ) class GuardianSwitch(GuardianEntity, SwitchEntity): """Define a switch to open/close the Guardian valve.""" def __init__( self, entry: ConfigEntry, client: Client, coordinators: Dict[str, DataUpdateCoordinator], ): """Initialize.""" super().__init__( entry, client, coordinators, "valve", "Valve", None, "mdi:water" ) self._is_on = True @property def available(self) -> bool: """Return whether the entity is available.""" return self._coordinators[API_VALVE_STATUS].last_update_success @property def is_on(self) -> bool: """Return True if the valve is open.""" return self._is_on async def _async_internal_added_to_hass(self): """Register API interest (and related tasks) when the entity is added.""" self.async_add_coordinator_update_listener(API_VALVE_STATUS) @callback def _async_update_from_latest_data(self) -> None: """Update the entity.""" self._is_on = self._coordinators[API_VALVE_STATUS].data["state"] in ( "start_opening", "opening", "finish_opening", "opened", ) self._attrs.update( { ATTR_AVG_CURRENT: self._coordinators[API_VALVE_STATUS].data[ "average_current" ], ATTR_INST_CURRENT: self._coordinators[API_VALVE_STATUS].data[ "instantaneous_current" ], ATTR_INST_CURRENT_DDT: self._coordinators[API_VALVE_STATUS].data[ "instantaneous_current_ddt" ], ATTR_TRAVEL_COUNT: self._coordinators[API_VALVE_STATUS].data[ "travel_count" ], } ) async def async_disable_ap(self): """Disable the device's onboard access point.""" try: async with self._client: await self._client.wifi.disable_ap() except GuardianError as err: LOGGER.error("Error during service call: %s", err) async def async_enable_ap(self): """Enable the device's onboard access point.""" try: async with self._client: await self._client.wifi.enable_ap() except GuardianError as err: LOGGER.error("Error during service call: %s", err) async def async_reboot(self): """Reboot the device.""" try: async with self._client: await self._client.system.reboot() except GuardianError as err: LOGGER.error("Error during service call: %s", err) async def async_reset_valve_diagnostics(self): """Fully reset system motor diagnostics.""" try: async with self._client: await self._client.valve.reset() except GuardianError as err: LOGGER.error("Error during service call: %s", err) async def async_upgrade_firmware(self, *, url, port, filename): """Upgrade the device firmware.""" try: async with self._client: await self._client.system.upgrade_firmware( url=url, port=port, filename=filename, ) except GuardianError as err: LOGGER.error("Error during service call: %s", err) async def async_turn_off(self, **kwargs) -> None: """Turn the valve off (closed).""" try: async with self._client: await self._client.valve.close() except GuardianError as err: LOGGER.error("Error while closing the valve: %s", err) return self._is_on = False self.async_write_ha_state() async def async_turn_on(self, **kwargs) -> None: """Turn the valve on (open).""" try: async with self._client: await self._client.valve.open() except GuardianError as err: LOGGER.error("Error while opening the valve: %s", err) return self._is_on = True self.async_write_ha_state()
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/guardian/switch.py
"""Config flow to configure Coolmaster.""" from pycoolmasternet import CoolMasterNet import voluptuous as vol from homeassistant import config_entries, core from homeassistant.const import CONF_HOST, CONF_PORT # pylint: disable=unused-import from .const import AVAILABLE_MODES, CONF_SUPPORTED_MODES, DEFAULT_PORT, DOMAIN MODES_SCHEMA = {vol.Required(mode, default=True): bool for mode in AVAILABLE_MODES} DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str, **MODES_SCHEMA}) async def _validate_connection(hass: core.HomeAssistant, host): cool = CoolMasterNet(host, port=DEFAULT_PORT) devices = await hass.async_add_executor_job(cool.devices) return bool(devices) class CoolmasterConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a Coolmaster config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL @core.callback def _async_get_entry(self, data): supported_modes = [ key for (key, value) in data.items() if key in AVAILABLE_MODES and value ] return self.async_create_entry( title=data[CONF_HOST], data={ CONF_HOST: data[CONF_HOST], CONF_PORT: DEFAULT_PORT, CONF_SUPPORTED_MODES: supported_modes, }, ) async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" if user_input is None: return self.async_show_form(step_id="user", data_schema=DATA_SCHEMA) errors = {} host = user_input[CONF_HOST] try: result = await _validate_connection(self.hass, host) if not result: errors["base"] = "no_units" except (ConnectionRefusedError, TimeoutError): errors["base"] = "connection_error" if errors: return self.async_show_form( step_id="user", data_schema=DATA_SCHEMA, errors=errors ) return self._async_get_entry(user_input)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/coolmaster/config_flow.py
"""Support for LaMetric notifications.""" import logging from lmnotify import Model, SimpleFrame, Sound from oauthlib.oauth2 import TokenExpiredError from requests.exceptions import ConnectionError as RequestsConnectionError import voluptuous as vol from homeassistant.components.notify import ( ATTR_DATA, ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService, ) from homeassistant.const import CONF_ICON import homeassistant.helpers.config_validation as cv from . import DOMAIN as LAMETRIC_DOMAIN _LOGGER = logging.getLogger(__name__) AVAILABLE_PRIORITIES = ["info", "warning", "critical"] AVAILABLE_ICON_TYPES = ["none", "info", "alert"] CONF_CYCLES = "cycles" CONF_LIFETIME = "lifetime" CONF_PRIORITY = "priority" CONF_ICON_TYPE = "icon_type" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_ICON, default="a7956"): cv.string, vol.Optional(CONF_LIFETIME, default=10): cv.positive_int, vol.Optional(CONF_CYCLES, default=1): cv.positive_int, vol.Optional(CONF_PRIORITY, default="warning"): vol.In(AVAILABLE_PRIORITIES), vol.Optional(CONF_ICON_TYPE, default="info"): vol.In(AVAILABLE_ICON_TYPES), } ) def get_service(hass, config, discovery_info=None): """Get the LaMetric notification service.""" hlmn = hass.data.get(LAMETRIC_DOMAIN) return LaMetricNotificationService( hlmn, config[CONF_ICON], config[CONF_LIFETIME] * 1000, config[CONF_CYCLES], config[CONF_PRIORITY], config[CONF_ICON_TYPE], ) class LaMetricNotificationService(BaseNotificationService): """Implement the notification service for LaMetric.""" def __init__( self, hasslametricmanager, icon, lifetime, cycles, priority, icon_type ): """Initialize the service.""" self.hasslametricmanager = hasslametricmanager self._icon = icon self._lifetime = lifetime self._cycles = cycles self._priority = priority self._icon_type = icon_type self._devices = [] def send_message(self, message="", **kwargs): """Send a message to some LaMetric device.""" targets = kwargs.get(ATTR_TARGET) data = kwargs.get(ATTR_DATA) _LOGGER.debug("Targets/Data: %s/%s", targets, data) icon = self._icon cycles = self._cycles sound = None priority = self._priority icon_type = self._icon_type # Additional data? if data is not None: if "icon" in data: icon = data["icon"] if "sound" in data: try: sound = Sound(category="notifications", sound_id=data["sound"]) _LOGGER.debug("Adding notification sound %s", data["sound"]) except AssertionError: _LOGGER.error("Sound ID %s unknown, ignoring", data["sound"]) if "cycles" in data: cycles = int(data["cycles"]) if "icon_type" in data: if data["icon_type"] in AVAILABLE_ICON_TYPES: icon_type = data["icon_type"] else: _LOGGER.warning( "Priority %s invalid, using default %s", data["priority"], priority, ) if "priority" in data: if data["priority"] in AVAILABLE_PRIORITIES: priority = data["priority"] else: _LOGGER.warning( "Priority %s invalid, using default %s", data["priority"], priority, ) text_frame = SimpleFrame(icon, message) _LOGGER.debug( "Icon/Message/Cycles/Lifetime: %s, %s, %d, %d", icon, message, self._cycles, self._lifetime, ) frames = [text_frame] model = Model(frames=frames, cycles=cycles, sound=sound) lmn = self.hasslametricmanager.manager try: self._devices = lmn.get_devices() except TokenExpiredError: _LOGGER.debug("Token expired, fetching new token") lmn.get_token() self._devices = lmn.get_devices() except RequestsConnectionError: _LOGGER.warning( "Problem connecting to LaMetric, using cached devices instead" ) for dev in self._devices: if targets is None or dev["name"] in targets: try: lmn.set_device(dev) lmn.send_notification( model, lifetime=self._lifetime, priority=priority, icon_type=icon_type, ) _LOGGER.debug("Sent notification to LaMetric %s", dev["name"]) except OSError: _LOGGER.warning("Cannot connect to LaMetric %s", dev["name"])
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/lametric/notify.py
"""A platform which allows you to get information from Tautulli.""" from datetime import timedelta import logging from pytautulli import Tautulli import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_API_KEY, CONF_HOST, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_PATH, CONF_PORT, CONF_SSL, CONF_VERIFY_SSL, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) CONF_MONITORED_USERS = "monitored_users" DEFAULT_NAME = "Tautulli" DEFAULT_PORT = "8181" DEFAULT_PATH = "" DEFAULT_SSL = False DEFAULT_VERIFY_SSL = True TIME_BETWEEN_UPDATES = timedelta(seconds=10) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_MONITORED_USERS): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string, vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Create the Tautulli sensor.""" name = config.get(CONF_NAME) host = config[CONF_HOST] port = config.get(CONF_PORT) path = config.get(CONF_PATH) api_key = config[CONF_API_KEY] monitored_conditions = config.get(CONF_MONITORED_CONDITIONS) user = config.get(CONF_MONITORED_USERS) use_ssl = config[CONF_SSL] verify_ssl = config.get(CONF_VERIFY_SSL) session = async_get_clientsession(hass, verify_ssl) tautulli = TautulliData( Tautulli(host, port, api_key, hass.loop, session, use_ssl, path) ) if not await tautulli.test_connection(): raise PlatformNotReady sensor = [TautulliSensor(tautulli, name, monitored_conditions, user)] async_add_entities(sensor, True) class TautulliSensor(Entity): """Representation of a Tautulli sensor.""" def __init__(self, tautulli, name, monitored_conditions, users): """Initialize the Tautulli sensor.""" self.tautulli = tautulli self.monitored_conditions = monitored_conditions self.usernames = users self.sessions = {} self.home = {} self._attributes = {} self._name = name self._state = None async def async_update(self): """Get the latest data from the Tautulli API.""" await self.tautulli.async_update() self.home = self.tautulli.api.home_data self.sessions = self.tautulli.api.session_data self._attributes["Top Movie"] = self.home.get("movie") self._attributes["Top TV Show"] = self.home.get("tv") self._attributes["Top User"] = self.home.get("user") for key in self.sessions: if "sessions" not in key: self._attributes[key] = self.sessions[key] for user in self.tautulli.api.users: if self.usernames is None or user in self.usernames: userdata = self.tautulli.api.user_data self._attributes[user] = {} self._attributes[user]["Activity"] = userdata[user]["Activity"] if self.monitored_conditions: for key in self.monitored_conditions: try: self._attributes[user][key] = userdata[user][key] except (KeyError, TypeError): self._attributes[user][key] = "" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self.sessions.get("stream_count") @property def icon(self): """Return the icon of the sensor.""" return "mdi:plex" @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return "Watching" @property def device_state_attributes(self): """Return attributes for the sensor.""" return self._attributes class TautulliData: """Get the latest data and update the states.""" def __init__(self, api): """Initialize the data object.""" self.api = api @Throttle(TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from Tautulli.""" await self.api.get_data() async def test_connection(self): """Test connection to Tautulli.""" await self.api.test_connection() connection_status = self.api.connection return connection_status
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/tautulli/sensor.py
"""Support for LIFX Cloud scenes.""" import asyncio import logging from typing import Any import aiohttp from aiohttp.hdrs import AUTHORIZATION import async_timeout import voluptuous as vol from homeassistant.components.scene import Scene from homeassistant.const import CONF_PLATFORM, CONF_TIMEOUT, CONF_TOKEN, HTTP_OK from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_TIMEOUT = 10 PLATFORM_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "lifx_cloud", vol.Required(CONF_TOKEN): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the scenes stored in the LIFX Cloud.""" token = config.get(CONF_TOKEN) timeout = config.get(CONF_TIMEOUT) headers = {AUTHORIZATION: f"Bearer {token}"} url = "https://api.lifx.com/v1/scenes" try: httpsession = async_get_clientsession(hass) with async_timeout.timeout(timeout): scenes_resp = await httpsession.get(url, headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.exception("Error on %s", url) return False status = scenes_resp.status if status == HTTP_OK: data = await scenes_resp.json() devices = [LifxCloudScene(hass, headers, timeout, scene) for scene in data] async_add_entities(devices) return True if status == 401: _LOGGER.error("Unauthorized (bad token?) on %s", url) return False _LOGGER.error("HTTP error %d on %s", scenes_resp.status, url) return False class LifxCloudScene(Scene): """Representation of a LIFX Cloud scene.""" def __init__(self, hass, headers, timeout, scene_data): """Initialize the scene.""" self.hass = hass self._headers = headers self._timeout = timeout self._name = scene_data["name"] self._uuid = scene_data["uuid"] @property def name(self): """Return the name of the scene.""" return self._name async def async_activate(self, **kwargs: Any) -> None: """Activate the scene.""" url = f"https://api.lifx.com/v1/scenes/scene_id:{self._uuid}/activate" try: httpsession = async_get_clientsession(self.hass) with async_timeout.timeout(self._timeout): await httpsession.put(url, headers=self._headers) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.exception("Error on %s", url)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/lifx_cloud/scene.py
"""SMA Solar Webconnect interface.""" from datetime import timedelta import logging import pysma import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_PATH, CONF_SCAN_INTERVAL, CONF_SSL, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval _LOGGER = logging.getLogger(__name__) CONF_CUSTOM = "custom" CONF_FACTOR = "factor" CONF_GROUP = "group" CONF_KEY = "key" CONF_SENSORS = "sensors" CONF_UNIT = "unit" GROUPS = ["user", "installer"] def _check_sensor_schema(conf): """Check sensors and attributes are valid.""" try: valid = [s.name for s in pysma.Sensors()] except (ImportError, AttributeError): return conf customs = list(conf[CONF_CUSTOM].keys()) for sensor in conf[CONF_SENSORS]: if sensor in customs: _LOGGER.warning( "All custom sensors will be added automatically, no need to include them in sensors: %s", sensor, ) elif sensor not in valid: raise vol.Invalid(f"{sensor} does not exist") return conf CUSTOM_SCHEMA = vol.Any( { vol.Required(CONF_KEY): vol.All(cv.string, vol.Length(min=13, max=15)), vol.Required(CONF_UNIT): cv.string, vol.Optional(CONF_FACTOR, default=1): vol.Coerce(float), vol.Optional(CONF_PATH): vol.All(cv.ensure_list, [cv.string]), } ) PLATFORM_SCHEMA = vol.All( PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_SSL, default=False): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS), vol.Optional(CONF_SENSORS, default=[]): vol.Any( cv.schema_with_slug_keys(cv.ensure_list), # will be deprecated vol.All(cv.ensure_list, [str]), ), vol.Optional(CONF_CUSTOM, default={}): cv.schema_with_slug_keys( CUSTOM_SCHEMA ), }, extra=vol.PREVENT_EXTRA, ), _check_sensor_schema, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up SMA WebConnect sensor.""" # Check config again during load - dependency available config = _check_sensor_schema(config) # Init all default sensors sensor_def = pysma.Sensors() # Sensor from the custom config sensor_def.add( [ pysma.Sensor(o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH)) for n, o in config[CONF_CUSTOM].items() ] ) # Use all sensors by default config_sensors = config[CONF_SENSORS] hass_sensors = [] used_sensors = [] if isinstance(config_sensors, dict): # will be remove from 0.99 if not config_sensors: # Use all sensors by default config_sensors = {s.name: [] for s in sensor_def} # Prepare all Home Assistant sensor entities for name, attr in config_sensors.items(): sub_sensors = [sensor_def[s] for s in attr] hass_sensors.append(SMAsensor(sensor_def[name], sub_sensors)) used_sensors.append(name) used_sensors.extend(attr) if isinstance(config_sensors, list): if not config_sensors: # Use all sensors by default config_sensors = [s.name for s in sensor_def] used_sensors = list(set(config_sensors + list(config[CONF_CUSTOM].keys()))) for sensor in used_sensors: hass_sensors.append(SMAsensor(sensor_def[sensor], [])) used_sensors = [sensor_def[s] for s in set(used_sensors)] async_add_entities(hass_sensors) # Init the SMA interface session = async_get_clientsession(hass, verify_ssl=config[CONF_VERIFY_SSL]) grp = config[CONF_GROUP] protocol = "https" if config[CONF_SSL] else "http" url = f"{protocol}://{config[CONF_HOST]}" sma = pysma.SMA(session, url, config[CONF_PASSWORD], group=grp) # Ensure we logout on shutdown async def async_close_session(event): """Close the session.""" await sma.close_session() hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_close_session) backoff = 0 backoff_step = 0 async def async_sma(event): """Update all the SMA sensors.""" nonlocal backoff, backoff_step if backoff > 1: backoff -= 1 return values = await sma.read(used_sensors) if not values: try: backoff = [1, 1, 1, 6, 30][backoff_step] backoff_step += 1 except IndexError: backoff = 60 return backoff_step = 0 for sensor in hass_sensors: sensor.async_update_values() interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=5) async_track_time_interval(hass, async_sma, interval) class SMAsensor(Entity): """Representation of a SMA sensor.""" def __init__(self, pysma_sensor, sub_sensors): """Initialize the sensor.""" self._sensor = pysma_sensor self._sub_sensors = sub_sensors # Can be remove from 0.99 self._attr = {s.name: "" for s in sub_sensors} self._state = self._sensor.value @property def name(self): """Return the name of the sensor.""" return self._sensor.name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._sensor.unit @property def device_state_attributes(self): # Can be remove from 0.99 """Return the state attributes of the sensor.""" return self._attr @property def poll(self): """SMA sensors are updated & don't poll.""" return False @callback def async_update_values(self): """Update this sensor.""" update = False for sens in self._sub_sensors: # Can be remove from 0.99 newval = f"{sens.value} {sens.unit}" if self._attr[sens.name] != newval: update = True self._attr[sens.name] = newval if self._sensor.value != self._state: update = True self._state = self._sensor.value if update: self.async_write_ha_state() @property def unique_id(self): """Return a unique identifier for this sensor.""" return f"sma-{self._sensor.key}-{self._sensor.name}"
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/sma/sensor.py
"""Support for ComEd Hourly Pricing data.""" import asyncio from datetime import timedelta import json import logging import aiohttp import async_timeout import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, CONF_OFFSET from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) _RESOURCE = "https://hourlypricing.comed.com/api" SCAN_INTERVAL = timedelta(minutes=5) ATTRIBUTION = "Data provided by ComEd Hourly Pricing service" CONF_CURRENT_HOUR_AVERAGE = "current_hour_average" CONF_FIVE_MINUTE = "five_minute" CONF_MONITORED_FEEDS = "monitored_feeds" CONF_SENSOR_TYPE = "type" SENSOR_TYPES = { CONF_FIVE_MINUTE: ["ComEd 5 Minute Price", "c"], CONF_CURRENT_HOUR_AVERAGE: ["ComEd Current Hour Average Price", "c"], } TYPES_SCHEMA = vol.In(SENSOR_TYPES) SENSORS_SCHEMA = vol.Schema( { vol.Required(CONF_SENSOR_TYPE): TYPES_SCHEMA, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_OFFSET, default=0.0): vol.Coerce(float), } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_MONITORED_FEEDS): [SENSORS_SCHEMA]} ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the ComEd Hourly Pricing sensor.""" websession = async_get_clientsession(hass) dev = [] for variable in config[CONF_MONITORED_FEEDS]: dev.append( ComedHourlyPricingSensor( hass.loop, websession, variable[CONF_SENSOR_TYPE], variable[CONF_OFFSET], variable.get(CONF_NAME), ) ) async_add_entities(dev, True) class ComedHourlyPricingSensor(Entity): """Implementation of a ComEd Hourly Pricing sensor.""" def __init__(self, loop, websession, sensor_type, offset, name): """Initialize the sensor.""" self.loop = loop self.websession = websession if name: self._name = name else: self._name = SENSOR_TYPES[sensor_type][0] self.type = sensor_type self.offset = offset self._state = None self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes.""" return {ATTR_ATTRIBUTION: ATTRIBUTION} async def async_update(self): """Get the ComEd Hourly Pricing data from the web service.""" try: if self.type == CONF_FIVE_MINUTE or self.type == CONF_CURRENT_HOUR_AVERAGE: url_string = _RESOURCE if self.type == CONF_FIVE_MINUTE: url_string += "?type=5minutefeed" else: url_string += "?type=currenthouraverage" with async_timeout.timeout(60): response = await self.websession.get(url_string) # The API responds with MIME type 'text/html' text = await response.text() data = json.loads(text) self._state = round(float(data[0]["price"]) + self.offset, 2) else: self._state = None except (asyncio.TimeoutError, aiohttp.ClientError) as err: _LOGGER.error("Could not get data from ComEd API: %s", err) except (ValueError, KeyError): _LOGGER.warning("Could not update status for %s", self.name)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/comed_hourly_pricing/sensor.py
"""Support for Neato sensors.""" from datetime import timedelta import logging from pybotvac.exceptions import NeatoRobotException from homeassistant.components.sensor import DEVICE_CLASS_BATTERY from homeassistant.const import UNIT_PERCENTAGE from homeassistant.helpers.entity import Entity from .const import NEATO_DOMAIN, NEATO_LOGIN, NEATO_ROBOTS, SCAN_INTERVAL_MINUTES _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES) BATTERY = "Battery" async def async_setup_entry(hass, entry, async_add_entities): """Set up the Neato sensor using config entry.""" dev = [] neato = hass.data.get(NEATO_LOGIN) for robot in hass.data[NEATO_ROBOTS]: dev.append(NeatoSensor(neato, robot)) if not dev: return _LOGGER.debug("Adding robots for sensors %s", dev) async_add_entities(dev, True) class NeatoSensor(Entity): """Neato sensor.""" def __init__(self, neato, robot): """Initialize Neato sensor.""" self.robot = robot self._available = neato.logged_in if neato is not None else False self._robot_name = f"{self.robot.name} {BATTERY}" self._robot_serial = self.robot.serial self._state = None def update(self): """Update Neato Sensor.""" try: self._state = self.robot.state except NeatoRobotException as ex: if self._available: _LOGGER.error( "Neato sensor connection error for '%s': %s", self.entity_id, ex ) self._state = None self._available = False return self._available = True _LOGGER.debug("self._state=%s", self._state) @property def name(self): """Return the name of this sensor.""" return self._robot_name @property def unique_id(self): """Return unique ID.""" return self._robot_serial @property def device_class(self): """Return the device class.""" return DEVICE_CLASS_BATTERY @property def available(self): """Return availability.""" return self._available @property def state(self): """Return the state.""" return self._state["details"]["charge"] @property def unit_of_measurement(self): """Return unit of measurement.""" return UNIT_PERCENTAGE @property def device_info(self): """Device info for neato robot.""" return {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}}
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/neato/sensor.py
"""Support for the Environment Canada radar imagery.""" import datetime import logging from env_canada import ECRadar import voluptuous as vol from homeassistant.components.camera import PLATFORM_SCHEMA, Camera from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, ) import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_UPDATED = "updated" CONF_ATTRIBUTION = "Data provided by Environment Canada" CONF_STATION = "station" CONF_LOOP = "loop" CONF_PRECIP_TYPE = "precip_type" MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_LOOP, default=True): cv.boolean, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION): cv.matches_regex(r"^C[A-Z]{4}$|^[A-Z]{3}$"), vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude, vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude, vol.Optional(CONF_PRECIP_TYPE): ["RAIN", "SNOW"], } ) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Environment Canada camera.""" if config.get(CONF_STATION): radar_object = ECRadar( station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE) ) else: lat = config.get(CONF_LATITUDE, hass.config.latitude) lon = config.get(CONF_LONGITUDE, hass.config.longitude) radar_object = ECRadar( coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE) ) add_devices([ECCamera(radar_object, config.get(CONF_NAME))], True) class ECCamera(Camera): """Implementation of an Environment Canada radar camera.""" def __init__(self, radar_object, camera_name): """Initialize the camera.""" super().__init__() self.radar_object = radar_object self.camera_name = camera_name self.content_type = "image/gif" self.image = None self.timestamp = None def camera_image(self): """Return bytes of camera image.""" self.update() return self.image @property def name(self): """Return the name of the camera.""" if self.camera_name is not None: return self.camera_name return "Environment Canada Radar" @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION, ATTR_UPDATED: self.timestamp} return attr @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Update radar image.""" if CONF_LOOP: self.image = self.radar_object.get_loop() else: self.image = self.radar_object.get_latest_frame() self.timestamp = self.radar_object.timestamp
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/environment_canada/camera.py
"""Support for binary sensor using I2C MCP23017 chip.""" import logging from adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error import board # pylint: disable=import-error import busio # pylint: disable=import-error import digitalio # pylint: disable=import-error import voluptuous as vol from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity from homeassistant.const import DEVICE_DEFAULT_NAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_INVERT_LOGIC = "invert_logic" CONF_I2C_ADDRESS = "i2c_address" CONF_PINS = "pins" CONF_PULL_MODE = "pull_mode" MODE_UP = "UP" MODE_DOWN = "DOWN" DEFAULT_INVERT_LOGIC = False DEFAULT_I2C_ADDRESS = 0x20 DEFAULT_PULL_MODE = MODE_UP _SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string}) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_PINS): _SENSORS_SCHEMA, vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): vol.All( vol.Upper, vol.In([MODE_UP, MODE_DOWN]) ), vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int), } ) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the MCP23017 binary sensors.""" pull_mode = config[CONF_PULL_MODE] invert_logic = config[CONF_INVERT_LOGIC] i2c_address = config[CONF_I2C_ADDRESS] i2c = busio.I2C(board.SCL, board.SDA) mcp = MCP23017(i2c, address=i2c_address) binary_sensors = [] pins = config[CONF_PINS] for pin_num, pin_name in pins.items(): pin = mcp.get_pin(pin_num) binary_sensors.append( MCP23017BinarySensor(pin_name, pin, pull_mode, invert_logic) ) add_devices(binary_sensors, True) class MCP23017BinarySensor(BinarySensorEntity): """Represent a binary sensor that uses MCP23017.""" def __init__(self, name, pin, pull_mode, invert_logic): """Initialize the MCP23017 binary sensor.""" self._name = name or DEVICE_DEFAULT_NAME self._pin = pin self._pull_mode = pull_mode self._invert_logic = invert_logic self._state = None self._pin.direction = digitalio.Direction.INPUT self._pin.pull = digitalio.Pull.UP @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return the state of the entity.""" return self._state != self._invert_logic def update(self): """Update the GPIO state.""" self._state = self._pin.value
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/mcp23017/binary_sensor.py
"""Allows to configure a switch using BeagleBone Black GPIO.""" import logging import voluptuous as vol from homeassistant.components import bbb_gpio from homeassistant.components.switch import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import ToggleEntity _LOGGER = logging.getLogger(__name__) CONF_PINS = "pins" CONF_INITIAL = "initial" CONF_INVERT_LOGIC = "invert_logic" PIN_SCHEMA = vol.Schema( { vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_INITIAL, default=False): cv.boolean, vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean, } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the BeagleBone Black GPIO devices.""" pins = config[CONF_PINS] switches = [] for pin, params in pins.items(): switches.append(BBBGPIOSwitch(pin, params)) add_entities(switches) class BBBGPIOSwitch(ToggleEntity): """Representation of a BeagleBone Black GPIO.""" def __init__(self, pin, params): """Initialize the pin.""" self._pin = pin self._name = params[CONF_NAME] or DEVICE_DEFAULT_NAME self._state = params[CONF_INITIAL] self._invert_logic = params[CONF_INVERT_LOGIC] bbb_gpio.setup_output(self._pin) if self._state is False: bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0) else: bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1) @property def name(self): """Return the name of the switch.""" return self._name @property def should_poll(self): """No polling needed.""" return False @property def is_on(self): """Return true if device is on.""" return self._state def turn_on(self, **kwargs): """Turn the device on.""" bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1) self._state = True self.schedule_update_ha_state() def turn_off(self, **kwargs): """Turn the device off.""" bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0) self._state = False self.schedule_update_ha_state()
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/components/bbb_gpio/switch.py
"""All methods needed to bootstrap a Home Assistant instance.""" import asyncio import logging.handlers from timeit import default_timer as timer from types import ModuleType from typing import Awaitable, Callable, Optional, Set from homeassistant import config as conf_util, core, loader, requirements from homeassistant.config import async_notify_setup_error from homeassistant.const import EVENT_COMPONENT_LOADED, PLATFORM_FORMAT from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.typing import ConfigType from homeassistant.util import dt as dt_util _LOGGER = logging.getLogger(__name__) ATTR_COMPONENT = "component" DATA_SETUP_DONE = "setup_done" DATA_SETUP_STARTED = "setup_started" DATA_SETUP = "setup_tasks" DATA_DEPS_REQS = "deps_reqs_processed" SLOW_SETUP_WARNING = 10 # Since its possible for databases to be # upwards of 36GiB (or larger) in the wild # we wait up to 3 hours for startup SLOW_SETUP_MAX_WAIT = 10800 @core.callback def async_set_domains_to_be_loaded(hass: core.HomeAssistant, domains: Set[str]) -> None: """Set domains that are going to be loaded from the config. This will allow us to properly handle after_dependencies. """ hass.data[DATA_SETUP_DONE] = {domain: asyncio.Event() for domain in domains} def setup_component(hass: core.HomeAssistant, domain: str, config: ConfigType) -> bool: """Set up a component and all its dependencies.""" return asyncio.run_coroutine_threadsafe( async_setup_component(hass, domain, config), hass.loop ).result() async def async_setup_component( hass: core.HomeAssistant, domain: str, config: ConfigType ) -> bool: """Set up a component and all its dependencies. This method is a coroutine. """ if domain in hass.config.components: return True setup_tasks = hass.data.setdefault(DATA_SETUP, {}) if domain in setup_tasks: return await setup_tasks[domain] # type: ignore task = setup_tasks[domain] = hass.async_create_task( _async_setup_component(hass, domain, config) ) try: return await task # type: ignore finally: if domain in hass.data.get(DATA_SETUP_DONE, {}): hass.data[DATA_SETUP_DONE].pop(domain).set() async def _async_process_dependencies( hass: core.HomeAssistant, config: ConfigType, integration: loader.Integration ) -> bool: """Ensure all dependencies are set up.""" tasks = { dep: hass.loop.create_task(async_setup_component(hass, dep, config)) for dep in integration.dependencies } to_be_loaded = hass.data.get(DATA_SETUP_DONE, {}) for dep in integration.after_dependencies: if dep in to_be_loaded and dep not in hass.config.components: tasks[dep] = hass.loop.create_task(to_be_loaded[dep].wait()) if not tasks: return True _LOGGER.debug("Dependency %s will wait for %s", integration.domain, list(tasks)) results = await asyncio.gather(*tasks.values()) failed = [ domain for idx, domain in enumerate(integration.dependencies) if not results[idx] ] if failed: _LOGGER.error( "Unable to set up dependencies of %s. Setup failed for dependencies: %s", integration.domain, ", ".join(failed), ) return False return True async def _async_setup_component( hass: core.HomeAssistant, domain: str, config: ConfigType ) -> bool: """Set up a component for Home Assistant. This method is a coroutine. """ def log_error(msg: str, link: Optional[str] = None) -> None: """Log helper.""" _LOGGER.error("Setup failed for %s: %s", domain, msg) async_notify_setup_error(hass, domain, link) try: integration = await loader.async_get_integration(hass, domain) except loader.IntegrationNotFound: log_error("Integration not found.") return False # Validate all dependencies exist and there are no circular dependencies if not await integration.resolve_dependencies(): return False # Process requirements as soon as possible, so we can import the component # without requiring imports to be in functions. try: await async_process_deps_reqs(hass, config, integration) except HomeAssistantError as err: log_error(str(err), integration.documentation) return False # Some integrations fail on import because they call functions incorrectly. # So we do it before validating config to catch these errors. try: component = integration.get_component() except ImportError as err: log_error(f"Unable to import component: {err}", integration.documentation) return False except Exception: # pylint: disable=broad-except _LOGGER.exception("Setup failed for %s: unknown error", domain) return False processed_config = await conf_util.async_process_component_config( hass, config, integration ) if processed_config is None: log_error("Invalid config.", integration.documentation) return False start = timer() _LOGGER.info("Setting up %s", domain) hass.data.setdefault(DATA_SETUP_STARTED, {})[domain] = dt_util.utcnow() if hasattr(component, "PLATFORM_SCHEMA"): # Entity components have their own warning warn_task = None else: warn_task = hass.loop.call_later( SLOW_SETUP_WARNING, _LOGGER.warning, "Setup of %s is taking over %s seconds.", domain, SLOW_SETUP_WARNING, ) try: if hasattr(component, "async_setup"): task = component.async_setup( # type: ignore hass, processed_config ) elif hasattr(component, "setup"): # This should not be replaced with hass.async_add_executor_job because # we don't want to track this task in case it blocks startup. task = hass.loop.run_in_executor( None, component.setup, hass, processed_config # type: ignore ) else: log_error("No setup function defined.") hass.data[DATA_SETUP_STARTED].pop(domain) return False result = await asyncio.wait_for(task, SLOW_SETUP_MAX_WAIT) except asyncio.TimeoutError: _LOGGER.error( "Setup of %s is taking longer than %s seconds." " Startup will proceed without waiting any longer", domain, SLOW_SETUP_MAX_WAIT, ) hass.data[DATA_SETUP_STARTED].pop(domain) return False except Exception: # pylint: disable=broad-except _LOGGER.exception("Error during setup of component %s", domain) async_notify_setup_error(hass, domain, integration.documentation) hass.data[DATA_SETUP_STARTED].pop(domain) return False finally: end = timer() if warn_task: warn_task.cancel() _LOGGER.info("Setup of domain %s took %.1f seconds", domain, end - start) if result is False: log_error("Integration failed to initialize.") hass.data[DATA_SETUP_STARTED].pop(domain) return False if result is not True: log_error( f"Integration {domain!r} did not return boolean if setup was " "successful. Disabling component." ) hass.data[DATA_SETUP_STARTED].pop(domain) return False # Flush out async_setup calling create_task. Fragile but covered by test. await asyncio.sleep(0) await hass.config_entries.flow.async_wait_init_flow_finish(domain) await asyncio.gather( *[ entry.async_setup(hass, integration=integration) for entry in hass.config_entries.async_entries(domain) ] ) hass.config.components.add(domain) hass.data[DATA_SETUP_STARTED].pop(domain) # Cleanup if domain in hass.data[DATA_SETUP]: hass.data[DATA_SETUP].pop(domain) hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: domain}) return True async def async_prepare_setup_platform( hass: core.HomeAssistant, hass_config: ConfigType, domain: str, platform_name: str ) -> Optional[ModuleType]: """Load a platform and makes sure dependencies are setup. This method is a coroutine. """ platform_path = PLATFORM_FORMAT.format(domain=domain, platform=platform_name) def log_error(msg: str) -> None: """Log helper.""" _LOGGER.error("Unable to prepare setup for platform %s: %s", platform_path, msg) async_notify_setup_error(hass, platform_path) try: integration = await loader.async_get_integration(hass, platform_name) except loader.IntegrationNotFound: log_error("Integration not found") return None # Process deps and reqs as soon as possible, so that requirements are # available when we import the platform. try: await async_process_deps_reqs(hass, hass_config, integration) except HomeAssistantError as err: log_error(str(err)) return None try: platform = integration.get_platform(domain) except ImportError as exc: log_error(f"Platform not found ({exc}).") return None # Already loaded if platform_path in hass.config.components: return platform # Platforms cannot exist on their own, they are part of their integration. # If the integration is not set up yet, and can be set up, set it up. if integration.domain not in hass.config.components: try: component = integration.get_component() except ImportError as exc: log_error(f"Unable to import the component ({exc}).") return None if hasattr(component, "setup") or hasattr(component, "async_setup"): if not await async_setup_component(hass, integration.domain, hass_config): log_error("Unable to set up component.") return None return platform async def async_process_deps_reqs( hass: core.HomeAssistant, config: ConfigType, integration: loader.Integration ) -> None: """Process all dependencies and requirements for a module. Module is a Python module of either a component or platform. """ processed = hass.data.get(DATA_DEPS_REQS) if processed is None: processed = hass.data[DATA_DEPS_REQS] = set() elif integration.domain in processed: return if not await _async_process_dependencies(hass, config, integration): raise HomeAssistantError("Could not set up all dependencies.") if not hass.config.skip_pip and integration.requirements: await requirements.async_get_integration_with_requirements( hass, integration.domain ) processed.add(integration.domain) @core.callback def async_when_setup( hass: core.HomeAssistant, component: str, when_setup_cb: Callable[[core.HomeAssistant, str], Awaitable[None]], ) -> None: """Call a method when a component is setup.""" async def when_setup() -> None: """Call the callback.""" try: await when_setup_cb(hass, component) except Exception: # pylint: disable=broad-except _LOGGER.exception("Error handling when_setup callback for %s", component) # Running it in a new task so that it always runs after if component in hass.config.components: hass.async_create_task(when_setup()) return unsub = None async def loaded_event(event: core.Event) -> None: """Call the callback.""" if event.data[ATTR_COMPONENT] != component: return unsub() # type: ignore await when_setup() unsub = hass.bus.async_listen(EVENT_COMPONENT_LOADED, loaded_event)
"""The tests for Philips Hue device triggers.""" import pytest from homeassistant.components import hue import homeassistant.components.automation as automation from homeassistant.components.hue import device_trigger from homeassistant.setup import async_setup_component from .conftest import setup_bridge_for_sensors as setup_bridge from .test_sensor_base import HUE_DIMMER_REMOTE_1, HUE_TAP_REMOTE_1 from tests.common import ( assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, ) REMOTES_RESPONSE = {"7": HUE_TAP_REMOTE_1, "8": HUE_DIMMER_REMOTE_1} @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_get_triggers(hass, mock_bridge, device_reg): """Test we get the expected triggers from a hue remote.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 remotes, just 1 battery sensor assert len(hass.states.async_all()) == 1 # Get triggers for specific tap switch hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_tap_device.id) expected_triggers = [ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_TAP_REMOTE.keys() ] assert_lists_same(triggers, expected_triggers) # Get triggers for specific dimmer switch hue_dimmer_device = device_reg.async_get_device( {(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}, connections={} ) triggers = await async_get_device_automations(hass, "trigger", hue_dimmer_device.id) trigger_batt = { "platform": "device", "domain": "sensor", "device_id": hue_dimmer_device.id, "type": "battery_level", "entity_id": "sensor.hue_dimmer_switch_1_battery_level", } expected_triggers = [ trigger_batt, *[ { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_dimmer_device.id, "type": t_type, "subtype": t_subtype, } for t_type, t_subtype in device_trigger.HUE_DIMMER_REMOTE.keys() ], ] assert_lists_same(triggers, expected_triggers) async def test_if_fires_on_state_change(hass, mock_bridge, device_reg, calls): """Test for button press trigger firing.""" mock_bridge.mock_sensor_responses.append(REMOTES_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 1 # Set an automation with a specific tap switch trigger hue_tap_device = device_reg.async_get_device( {(hue.DOMAIN, "00:00:00:00:00:44:23:08")}, connections={} ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": hue_tap_device.id, "type": "remote_button_short_press", "subtype": "button_4", }, "action": { "service": "test.automation", "data_template": { "some": "B4 - {{ trigger.event.data.event }}" }, }, }, { "trigger": { "platform": "device", "domain": hue.DOMAIN, "device_id": "mock-device-id", "type": "remote_button_short_press", "subtype": "button_1", }, "action": { "service": "test.automation", "data_template": { "some": "B1 - {{ trigger.event.data.event }}" }, }, }, ] }, ) # Fake that the remote is being pressed. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 18, "lastupdated": "2019-12-28T22:58:02", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(calls) == 1 assert calls[0].data["some"] == "B4 - 18" # Fake another button press. new_sensor_response = dict(REMOTES_RESPONSE) new_sensor_response["7"]["state"] = { "buttonevent": 34, "lastupdated": "2019-12-28T22:58:05", } mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 3 assert len(calls) == 1
mKeRix/home-assistant
tests/components/hue/test_device_trigger.py
homeassistant/setup.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ sextractor.py: Classes to read SExtractor table format Built on daophot.py: :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) """ import re from . import core class SExtractorHeader(core.BaseHeader): """Read the header from a file produced by SExtractor.""" comment = r'^\s*#\s*\S\D.*' # Find lines that don't have "# digit" def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines`` for a SExtractor header. The SExtractor header is specialized so that we just copy the entire BaseHeader get_cols routine and modify as needed. Parameters ---------- lines : list List of table lines """ # This assumes that the columns are listed in order, one per line with a # header comment string of the format: "# 1 ID short description [unit]" # However, some may be missing and must be inferred from skipped column numbers columns = {} # E.g. '# 1 ID identification number' (no units) or '# 2 MAGERR magnitude of error [mag]' # Updated along with issue #4603, for more robust parsing of unit re_name_def = re.compile(r"""^\s* \# \s* # possible whitespace around # (?P<colnumber> [0-9]+)\s+ # number of the column in table (?P<colname> [-\w]+) # name of the column # column description, match any character until... (?:\s+(?P<coldescr> \w .+) # ...until [non-space][space][unit] or [not-right-bracket][end] (?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))? (?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets """, re.VERBOSE) dataline = None for line in lines: if not line.startswith('#'): dataline = line # save for later to infer the actual number of columns break # End of header lines else: match = re_name_def.search(line) if match: colnumber = int(match.group('colnumber')) colname = match.group('colname') coldescr = match.group('coldescr') colunit = match.group('colunit') # If no units are given, colunit = None columns[colnumber] = (colname, coldescr, colunit) # Handle skipped column numbers colnumbers = sorted(columns) # Handle the case where the last column is array-like by append a pseudo column # If there are more data columns than the largest column number # then add a pseudo-column that will be dropped later. This allows # the array column logic below to work in all cases. if dataline is not None: n_data_cols = len(dataline.split()) else: # handles no data, where we have to rely on the last column number n_data_cols = colnumbers[-1] # sextractor column number start at 1. columns[n_data_cols + 1] = (None, None, None) colnumbers.append(n_data_cols + 1) if len(columns) > 1: # only fill in skipped columns when there is genuine column initially previous_column = 0 for n in colnumbers: if n != previous_column + 1: for c in range(previous_column + 1, n): column_name = (columns[previous_column][0] + f"_{c - previous_column}") column_descr = columns[previous_column][1] column_unit = columns[previous_column][2] columns[c] = (column_name, column_descr, column_unit) previous_column = n # Add the columns in order to self.names colnumbers = sorted(columns)[:-1] # drop the pseudo column self.names = [] for n in colnumbers: self.names.append(columns[n][0]) if not self.names: raise core.InconsistentTableError('No column names found in SExtractor header') self.cols = [] for n in colnumbers: col = core.Column(name=columns[n][0]) col.description = columns[n][1] col.unit = columns[n][2] self.cols.append(col) class SExtractorData(core.BaseData): start_line = 0 delimiter = ' ' comment = r'\s*#' class SExtractor(core.BaseReader): """SExtractor format table. SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts 1996, A&A Supp. 317, 393.) See: https://sextractor.readthedocs.io/en/latest/ Example:: # 1 NUMBER # 2 ALPHA_J2000 # 3 DELTA_J2000 # 4 FLUX_RADIUS # 7 MAG_AUTO [mag] # 8 X2_IMAGE Variance along x [pixel**2] # 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)] # 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)] 1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498 2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401 Note the skipped numbers since flux_radius has 3 columns. The three FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2 Also note that a post-ID description (e.g. "Variance along x") is optional and that units may be specified at the end of a line in brackets. """ _format_name = 'sextractor' _io_registry_can_write = False _description = 'SExtractor format table' header_class = SExtractorHeader data_class = SExtractorData inputter_class = core.ContinuationLinesInputter def read(self, table): """ Read input data (file-like object, filename, list of strings, or single string) into a Table and return the result. """ out = super().read(table) # remove the comments if 'comments' in out.meta: del out.meta['comments'] return out def write(self, table): raise NotImplementedError
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/io/ascii/sextractor.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO from astropy.table import Table from astropy.nddata import CCDData def test_table_read_help_fits(): """ Test dynamically created documentation help via the I/O registry for 'fits'. """ out = StringIO() Table.read.help('fits', out) doc = out.getvalue() # Check a smattering of expected content assert "Table.read general documentation" not in doc assert "The available built-in formats" not in doc assert "Table.read(format='fits') documentation" in doc assert "hdu : int or str, optional" in doc def test_table_read_help_ascii(): """ Test dynamically created documentation help via the I/O registry for 'ascii'. """ out = StringIO() Table.read.help('ascii', out) doc = out.getvalue() # Check a smattering of expected content assert "Table.read general documentation" not in doc assert "The available built-in formats" not in doc assert "Table.read(format='ascii') documentation" in doc assert "delimiter : str" in doc assert "ASCII reader 'ascii' details" in doc assert "Character-delimited table with a single header line" in doc def test_table_write_help_hdf5(): """ Test dynamically created documentation help via the I/O registry for 'hdf5'. """ out = StringIO() Table.write.help('hdf5', out) doc = out.getvalue() # Check a smattering of expected content assert "Table.write general documentation" not in doc assert "The available built-in formats" not in doc assert "Table.write(format='hdf5') documentation" in doc assert "Write a Table object to an HDF5 file" in doc assert "compression : bool or str or int" in doc def test_list_formats(): """ Test getting list of available formats """ out = StringIO() CCDData.write.list_formats(out) output = out.getvalue() assert output == """\ Format Read Write Auto-identify ------ ---- ----- ------------- fits Yes Yes Yes""" def test_table_write_help_fits(): """ Test dynamically created documentation help via the I/O registry for 'fits'. """ out = StringIO() Table.write.help('fits', out) doc = out.getvalue() # Check a smattering of expected content assert "Table.write general documentation" not in doc assert "The available built-in formats" not in doc assert "Table.write(format='fits') documentation" in doc assert "Write a Table object to a FITS file" in doc def test_table_write_help_no_format(): """ Test dynamically created documentation help via the I/O registry for no format provided. """ out = StringIO() Table.write.help(out=out) doc = out.getvalue() # Check a smattering of expected content assert "Table.write general documentation" in doc assert "The available built-in formats" in doc def test_table_read_help_no_format(): """ Test dynamically created documentation help via the I/O registry for not format provided. """ out = StringIO() Table.read.help(out=out) doc = out.getvalue() # Check a smattering of expected content assert "Table.read general documentation" in doc assert "The available built-in formats" in doc def test_ccddata_write_help_fits(): """ Test dynamically created documentation help via the I/O registry for 'fits'. """ out = StringIO() CCDData.write.help('fits', out) doc = out.getvalue() # Check a smattering of expected content assert "CCDData.write(format='fits') documentation" in doc assert "Write CCDData object to FITS file" in doc assert "key_uncertainty_type : str, optional" in doc def test_ccddata_read_help_fits(): """Test dynamically created documentation help via the I/O registry for CCDData 'fits'. """ out = StringIO() CCDData.read.help('fits', out) doc = out.getvalue() # Check a smattering of expected content assert "CCDData.read(format='fits') documentation" in doc assert "Generate a CCDData object from a FITS file" in doc assert "hdu_uncertainty : str or None, optional" in doc def test_table_write_help_jsviewer(): """ Test dynamically created documentation help via the I/O registry for 'jsviewer'. """ out = StringIO() Table.write.help('jsviewer', out) doc = out.getvalue() # Check a smattering of expected content assert "Table.write general documentation" not in doc assert "The available built-in formats" not in doc assert "Table.write(format='jsviewer') documentation" in doc
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/io/tests/test_registry_help.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy import wcs from . helper import SimModelTAB @pytest.fixture(scope='module') def tab_wcs_2di(): model = SimModelTAB(nx=150, ny=200) # generate FITS HDU list: hdulist = model.hdulist # create WCS object: w = wcs.WCS(hdulist[0].header, hdulist) return w @pytest.fixture(scope='module') def tab_wcsh_2di(): model = SimModelTAB(nx=150, ny=200) # generate FITS HDU list: hdulist = model.hdulist # create WCS object: w = wcs.WCS(hdulist[0].header, hdulist) return w, hdulist @pytest.fixture(scope='function') def tab_wcs_2di_f(): model = SimModelTAB(nx=150, ny=200) # generate FITS HDU list: hdulist = model.hdulist # create WCS object: w = wcs.WCS(hdulist[0].header, hdulist) return w
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/wcs/tests/conftest.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import time import sys import argparse from astropy import log, __version__ from .hub import SAMPHubServer __all__ = ['hub_script'] def hub_script(timeout=0): """ This main function is executed by the ``samp_hub`` command line tool. """ parser = argparse.ArgumentParser(prog="samp_hub " + __version__) parser.add_argument("-k", "--secret", dest="secret", metavar="CODE", help="custom secret code.") parser.add_argument("-d", "--addr", dest="addr", metavar="ADDR", help="listening address (or IP).") parser.add_argument("-p", "--port", dest="port", metavar="PORT", type=int, help="listening port number.") parser.add_argument("-f", "--lockfile", dest="lockfile", metavar="FILE", help="custom lockfile.") parser.add_argument("-w", "--no-web-profile", dest="web_profile", action="store_false", help="run the Hub disabling the Web Profile.", default=True) parser.add_argument("-P", "--pool-size", dest="pool_size", metavar="SIZE", type=int, help="the socket connections pool size.", default=20) timeout_group = parser.add_argument_group("Timeout group", "Special options to setup hub and client timeouts." "It contains a set of special options that allows to set up the Hub and " "clients inactivity timeouts, that is the Hub or client inactivity time " "interval after which the Hub shuts down or unregisters the client. " "Notification of samp.hub.disconnect MType is sent to the clients " "forcibly unregistered for timeout expiration.") timeout_group.add_argument("-t", "--timeout", dest="timeout", metavar="SECONDS", help="set the Hub inactivity timeout in SECONDS. By default it " "is set to 0, that is the Hub never expires.", type=int, default=0) timeout_group.add_argument("-c", "--client-timeout", dest="client_timeout", metavar="SECONDS", help="set the client inactivity timeout in SECONDS. By default it " "is set to 0, that is the client never expires.", type=int, default=0) parser.add_argument_group(timeout_group) log_group = parser.add_argument_group("Logging options", "Additional options which allow to customize the logging output. By " "default the SAMP Hub uses the standard output and standard error " "devices to print out INFO level logging messages. Using the options " "here below it is possible to modify the logging level and also " "specify the output files where redirect the logging messages.") log_group.add_argument("-L", "--log-level", dest="loglevel", metavar="LEVEL", help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).", type=str, choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"], default='INFO') log_group.add_argument("-O", "--log-output", dest="logout", metavar="FILE", help="set the output file for the log messages.", default="") parser.add_argument_group(log_group) adv_group = parser.add_argument_group("Advanced group", "Advanced options addressed to facilitate administrative tasks and " "allow new non-standard Hub behaviors. In particular the --label " "options is used to assign a value to hub.label token and is used to " "assign a name to the Hub instance. " "The very special --multi option allows to start a Hub in multi-instance mode. " "Multi-instance mode is a non-standard Hub behavior that enables " "multiple contemporaneous running Hubs. Multi-instance hubs place " "their non-standard lock-files within the <home directory>/.samp-1 " "directory naming them making use of the format: " "samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an " "internal ID (integer).") adv_group.add_argument("-l", "--label", dest="label", metavar="LABEL", help="assign a LABEL to the Hub.", default="") adv_group.add_argument("-m", "--multi", dest="mode", help="run the Hub in multi-instance mode generating a custom " "lockfile with a random name.", action="store_const", const='multiple', default='single') parser.add_argument_group(adv_group) options = parser.parse_args() try: if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"): log.setLevel(options.loglevel) if options.logout != "": context = log.log_to_file(options.logout) else: class dummy_context: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass context = dummy_context() with context: args = copy.deepcopy(options.__dict__) del(args["loglevel"]) del(args["logout"]) hub = SAMPHubServer(**args) hub.start(False) if not timeout: while hub.is_running: time.sleep(0.01) else: time.sleep(timeout) hub.stop() except KeyboardInterrupt: try: hub.stop() except NameError: pass except OSError as e: print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}") sys.exit(1) except SystemExit: pass
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/samp/hub_script.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # This module includes files automatically generated from ply (these end in # _lextab.py and _parsetab.py). To generate these files, remove them from this # folder, then build astropy and run the tests in-place: # # python setup.py build_ext --inplace # pytest astropy/coordinates # # You can then commit the changes to the re-generated _lextab.py and # _parsetab.py files. """ This module contains formatting functions that are for internal use in astropy.coordinates.angles. Mainly they are conversions from one format of data to another. """ import os import threading from warnings import warn import numpy as np from .errors import (IllegalHourWarning, IllegalHourError, IllegalMinuteWarning, IllegalMinuteError, IllegalSecondWarning, IllegalSecondError) from astropy.utils import format_exception, parsing from astropy import units as u class _AngleParser: """ Parses the various angle formats including: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s * 1°2′3″N This class should not be used directly. Use `parse_angle` instead. """ # For safe multi-threaded operation all class (but not instance) # members that carry state should be thread-local. They are stored # in the following class member _thread_local = threading.local() def __init__(self): # TODO: in principle, the parser should be invalidated if we change unit # system (from CDS to FITS, say). Might want to keep a link to the # unit_registry used, and regenerate the parser/lexer if it changes. # Alternatively, perhaps one should not worry at all and just pre- # generate the parser for each release (as done for unit formats). # For some discussion of this problem, see # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 if '_parser' not in _AngleParser._thread_local.__dict__: (_AngleParser._thread_local._parser, _AngleParser._thread_local._lexer) = self._make_parser() @classmethod def _get_simple_unit_names(cls): simple_units = set( u.radian.find_equivalent_units(include_prefix_units=True)) simple_unit_names = set() # We filter out degree and hourangle, since those are treated # separately. for unit in simple_units: if unit != u.deg and unit != u.hourangle: simple_unit_names.update(unit.names) return sorted(simple_unit_names) @classmethod def _make_parser(cls): from astropy.extern.ply import lex, yacc # List of token names. tokens = ( 'SIGN', 'UINT', 'UFLOAT', 'COLON', 'DEGREE', 'HOUR', 'MINUTE', 'SECOND', 'SIMPLE_UNIT', 'EASTWEST', 'NORTHSOUTH' ) # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! # Regular expression rules for simple tokens def t_UFLOAT(t): r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?' # The above includes Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. t.value = float(t.value.replace('−', '-')) return t def t_UINT(t): r'\d+' t.value = int(t.value) return t def t_SIGN(t): r'[+−-]' # The above include Unicode "MINUS SIGN" \u2212. It is # important to include the hyphen last, or the regex will # treat this as a range. if t.value == '+': t.value = 1.0 else: t.value = -1.0 return t def t_EASTWEST(t): r'[EW]$' t.value = -1.0 if t.value == 'W' else 1.0 return t def t_NORTHSOUTH(t): r'[NS]$' # We cannot use lower-case letters otherwise we'll confuse # s[outh] with s[econd] t.value = -1.0 if t.value == 'S' else 1.0 return t def t_SIMPLE_UNIT(t): t.value = u.Unit(t.value) return t t_SIMPLE_UNIT.__doc__ = '|'.join( f'(?:{x})' for x in cls._get_simple_unit_names()) t_COLON = ':' t_DEGREE = r'd(eg(ree(s)?)?)?|°' t_HOUR = r'hour(s)?|h(r)?|ʰ' t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ' t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ' # A string containing ignored characters (spaces) t_ignore = ' ' # Error handling rule def t_error(t): raise ValueError( f"Invalid character at col {t.lexpos}") lexer = parsing.lex(lextab='angle_lextab', package='astropy/coordinates') def p_angle(p): ''' angle : sign hms eastwest | sign dms dir | sign arcsecond dir | sign arcminute dir | sign simple dir ''' sign = p[1] * p[3] value, unit = p[2] if isinstance(value, tuple): p[0] = ((sign * value[0],) + value[1:], unit) else: p[0] = (sign * value, unit) def p_sign(p): ''' sign : SIGN | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_eastwest(p): ''' eastwest : EASTWEST | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_dir(p): ''' dir : EASTWEST | NORTHSOUTH | ''' if len(p) == 2: p[0] = p[1] else: p[0] = 1.0 def p_ufloat(p): ''' ufloat : UFLOAT | UINT ''' p[0] = p[1] def p_colon(p): ''' colon : UINT COLON ufloat | UINT COLON UINT COLON ufloat ''' if len(p) == 4: p[0] = (p[1], p[3]) elif len(p) == 6: p[0] = (p[1], p[3], p[5]) def p_spaced(p): ''' spaced : UINT ufloat | UINT UINT ufloat ''' if len(p) == 3: p[0] = (p[1], p[2]) elif len(p) == 4: p[0] = (p[1], p[2], p[3]) def p_generic(p): ''' generic : colon | spaced | ufloat ''' p[0] = p[1] def p_hms(p): ''' hms : UINT HOUR | UINT HOUR ufloat | UINT HOUR UINT MINUTE | UINT HOUR UFLOAT MINUTE | UINT HOUR UINT MINUTE ufloat | UINT HOUR UINT MINUTE ufloat SECOND | generic HOUR ''' if len(p) == 3: p[0] = (p[1], u.hourangle) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.hourangle) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.hourangle) def p_dms(p): ''' dms : UINT DEGREE | UINT DEGREE ufloat | UINT DEGREE UINT MINUTE | UINT DEGREE UFLOAT MINUTE | UINT DEGREE UINT MINUTE ufloat | UINT DEGREE UINT MINUTE ufloat SECOND | generic DEGREE ''' if len(p) == 3: p[0] = (p[1], u.degree) elif len(p) in (4, 5): p[0] = ((p[1], p[3]), u.degree) elif len(p) in (6, 7): p[0] = ((p[1], p[3], p[5]), u.degree) def p_simple(p): ''' simple : generic | generic SIMPLE_UNIT ''' if len(p) == 2: p[0] = (p[1], None) else: p[0] = (p[1], p[2]) def p_arcsecond(p): ''' arcsecond : generic SECOND ''' p[0] = (p[1], u.arcsecond) def p_arcminute(p): ''' arcminute : generic MINUTE ''' p[0] = (p[1], u.arcminute) def p_error(p): raise ValueError parser = parsing.yacc(tabmodule='angle_parsetab', package='astropy/coordinates') return parser, lexer def parse(self, angle, unit, debug=False): try: found_angle, found_unit = self._thread_local._parser.parse( angle, lexer=self._thread_local._lexer, debug=debug) except ValueError as e: if str(e): raise ValueError(f"{str(e)} in angle {angle!r}") else: raise ValueError( f"Syntax error parsing angle {angle!r}") if unit is None and found_unit is None: raise u.UnitsError("No unit specified") return found_angle, found_unit def _check_hour_range(hrs): """ Checks that the given value is in the range (-24, 24). """ if np.any(np.abs(hrs) == 24.): warn(IllegalHourWarning(hrs, 'Treating as 24 hr')) elif np.any(hrs < -24.) or np.any(hrs > 24.): raise IllegalHourError(hrs) def _check_minute_range(m): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(m == 60.): warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg')) elif np.any(m < -60.) or np.any(m > 60.): # "Error: minutes not in range [-60,60) ({0}).".format(min)) raise IllegalMinuteError(m) def _check_second_range(sec): """ Checks that the given value is in the range [0,60]. If the value is equal to 60, then a warning is raised. """ if np.any(sec == 60.): warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min')) elif sec is None: pass elif np.any(sec < -60.) or np.any(sec > 60.): # "Error: seconds not in range [-60,60) ({0}).".format(sec)) raise IllegalSecondError(sec) def check_hms_ranges(h, m, s): """ Checks that the given hour, minute and second are all within reasonable range. """ _check_hour_range(h) _check_minute_range(m) _check_second_range(s) return None def parse_angle(angle, unit=None, debug=False): """ Parses an input string value into an angle value. Parameters ---------- angle : str A string representing the angle. May be in one of the following forms: * 01:02:30.43 degrees * 1 2 0 hours * 1°2′3″ * 1d2m3s * -1h2m3s unit : `~astropy.units.UnitBase` instance, optional The unit used to interpret the string. If ``unit`` is not provided, the unit must be explicitly represented in the string, either at the end or as number separators. debug : bool, optional If `True`, print debugging information from the parser. Returns ------- value, unit : tuple ``value`` is the value as a floating point number or three-part tuple, and ``unit`` is a `Unit` instance which is either the unit passed in or the one explicitly mentioned in the input string. """ return _AngleParser().parse(angle, unit, debug=debug) def degrees_to_dms(d): """ Convert a floating-point degree value into a ``(degree, arcminute, arcsecond)`` tuple. """ sign = np.copysign(1.0, d) (df, d) = np.modf(np.abs(d)) # (degree fraction, degree) (mf, m) = np.modf(df * 60.) # (minute fraction, minute) s = mf * 60. return np.floor(sign * d), sign * np.floor(m), sign * s def dms_to_degrees(d, m, s=None): """ Convert degrees, arcminute, arcsecond to a float degrees value. """ _check_minute_range(m) _check_second_range(s) # determine sign sign = np.copysign(1.0, d) try: d = np.floor(np.abs(d)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError: raise ValueError(format_exception( "{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", d, m, s)) return sign * (d + m / 60. + s / 3600.) def hms_to_hours(h, m, s=None): """ Convert hour, minute, second to a float hour value. """ check_hms_ranges(h, m, s) # determine sign sign = np.copysign(1.0, h) try: h = np.floor(np.abs(h)) if s is None: m = np.abs(m) s = 0 else: m = np.floor(np.abs(m)) s = np.abs(s) except ValueError: raise ValueError(format_exception( "{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be " "converted to numbers.", h, m, s)) return sign * (h + m / 60. + s / 3600.) def hms_to_degrees(h, m, s): """ Convert hour, minute, second to a float degrees value. """ return hms_to_hours(h, m, s) * 15. def hms_to_radians(h, m, s): """ Convert hour, minute, second to a float radians value. """ return u.degree.to(u.radian, hms_to_degrees(h, m, s)) def hms_to_dms(h, m, s): """ Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)`` tuple. """ return degrees_to_dms(hms_to_degrees(h, m, s)) def hours_to_decimal(h): """ Convert any parseable hour value into a float value. """ from . import angles return angles.Angle(h, unit=u.hourangle).hour def hours_to_radians(h): """ Convert an angle in Hours to Radians. """ return u.hourangle.to(u.radian, h) def hours_to_hms(h): """ Convert an floating-point hour value into an ``(hour, minute, second)`` tuple. """ sign = np.copysign(1.0, h) (hf, h) = np.modf(np.abs(h)) # (degree fraction, degree) (mf, m) = np.modf(hf * 60.0) # (minute fraction, minute) s = mf * 60.0 return (np.floor(sign * h), sign * np.floor(m), sign * s) def radians_to_degrees(r): """ Convert an angle in Radians to Degrees. """ return u.radian.to(u.degree, r) def radians_to_hours(r): """ Convert an angle in Radians to Hours. """ return u.radian.to(u.hourangle, r) def radians_to_hms(r): """ Convert an angle in Radians to an ``(hour, minute, second)`` tuple. """ hours = radians_to_hours(r) return hours_to_hms(hours) def radians_to_dms(r): """ Convert an angle in Radians to an ``(degree, arcminute, arcsecond)`` tuple. """ degrees = u.radian.to(u.degree, r) return degrees_to_dms(degrees) def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',), fields=3): """ Given an already separated tuple of sexagesimal values, returns a string. See `hours_to_string` and `degrees_to_string` for a higher-level interface to this functionality. """ # Check to see if values[0] is negative, using np.copysign to handle -0 sign = np.copysign(1.0, values[0]) # If the coordinates are negative, we need to take the absolute values. # We use np.abs because abs(-0) is -0 # TODO: Is this true? (MHvK, 2018-02-01: not on my system) values = [np.abs(value) for value in values] if pad: if sign == -1: pad = 3 else: pad = 2 else: pad = 0 if not isinstance(sep, tuple): sep = tuple(sep) if fields < 1 or fields > 3: raise ValueError( "fields must be 1, 2, or 3") if not sep: # empty string, False, or None, etc. sep = ('', '', '') elif len(sep) == 1: if fields == 3: sep = sep + (sep[0], '') elif fields == 2: sep = sep + ('', '') else: sep = ('', '', '') elif len(sep) == 2: sep = sep + ('',) elif len(sep) != 3: raise ValueError( "Invalid separator specification for converting angle to string.") # Simplify the expression based on the requested precision. For # example, if the seconds will round up to 60, we should convert # it to 0 and carry upwards. If the field is hidden (by the # fields kwarg) we round up around the middle, 30.0. if precision is None: rounding_thresh = 60.0 - (10.0 ** -8) else: rounding_thresh = 60.0 - (10.0 ** -precision) if fields == 3 and values[2] >= rounding_thresh: values[2] = 0.0 values[1] += 1.0 elif fields < 3 and values[2] >= 30.0: values[1] += 1.0 if fields >= 2 and values[1] >= 60.0: values[1] = 0.0 values[0] += 1.0 elif fields < 2 and values[1] >= 30.0: values[0] += 1.0 literal = [] last_value = '' literal.append('{0:0{pad}.0f}{sep[0]}') if fields >= 2: literal.append('{1:02d}{sep[1]}') if fields == 3: if precision is None: last_value = f'{abs(values[2]):.8f}' last_value = last_value.rstrip('0').rstrip('.') else: last_value = '{0:.{precision}f}'.format( abs(values[2]), precision=precision) if len(last_value) == 1 or last_value[1] == '.': last_value = '0' + last_value literal.append('{last_value}{sep[2]}') literal = ''.join(literal) return literal.format(np.copysign(values[0], sign), int(values[1]), values[2], sep=sep, pad=pad, last_value=last_value) def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'), fields=3): """ Takes a decimal hour value and returns a string formatted as hms with separator specified by the 'sep' parameter. ``h`` must be a scalar. """ h, m, s = hours_to_hms(h) return sexagesimal_to_string((h, m, s), precision=precision, pad=pad, sep=sep, fields=fields) def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3): """ Takes a decimal hour value and returns a string formatted as dms with separator specified by the 'sep' parameter. ``d`` must be a scalar. """ d, m, s = degrees_to_dms(d) return sexagesimal_to_string((d, m, s), precision=precision, pad=pad, sep=sep, fields=fields)
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/coordinates/angle_formats.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tabular models. Tabular models of any dimension can be created using `tabular_model`. For convenience `Tabular1D` and `Tabular2D` are provided. Examples -------- >>> table = np.array([[ 3., 0., 0.], ... [ 0., 2., 0.], ... [ 0., 0., 0.]]) >>> points = ([1, 2, 3], [1, 2, 3]) >>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False, ... fill_value=None, method='nearest') """ # pylint: disable=invalid-name import abc import numpy as np from astropy import units as u from .core import Model try: from scipy.interpolate import interpn has_scipy = True except ImportError: has_scipy = False __all__ = ['tabular_model', 'Tabular1D', 'Tabular2D'] __doctest_requires__ = {('tabular_model'): ['scipy']} class _Tabular(Model): """ Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray must have shapes (m1, ), ..., (mn, ), lookup_table : array-like The data on a regular grid in n dimensions. Must have shapes (m1, ..., mn, ...) method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float or `~astropy.units.Quantity`, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". If Quantity is given, it will be converted to the unit of ``lookup_table``, if applicable. Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ linear = False fittable = False standard_broadcasting = False @property @abc.abstractmethod def lookup_table(self): pass _is_dynamic = True _id = 0 def __init__(self, points=None, lookup_table=None, method='linear', bounds_error=True, fill_value=np.nan, **kwargs): n_models = kwargs.get('n_models', 1) if n_models > 1: raise NotImplementedError('Only n_models=1 is supported.') super().__init__(**kwargs) self.outputs = ("y",) if lookup_table is None: raise ValueError('Must provide a lookup table.') if not isinstance(lookup_table, u.Quantity): lookup_table = np.asarray(lookup_table) if self.lookup_table.ndim != lookup_table.ndim: raise ValueError("lookup_table should be an array with " "{} dimensions.".format(self.lookup_table.ndim)) if points is None: points = tuple(np.arange(x, dtype=float) for x in lookup_table.shape) else: if lookup_table.ndim == 1 and not isinstance(points, tuple): points = (points,) npts = len(points) if npts != lookup_table.ndim: raise ValueError( "Expected grid points in " "{} directions, got {}.".format(lookup_table.ndim, npts)) if (npts > 1 and isinstance(points[0], u.Quantity) and len(set([getattr(p, 'unit', None) for p in points])) > 1): raise ValueError('points must all have the same unit.') if isinstance(fill_value, u.Quantity): if not isinstance(lookup_table, u.Quantity): raise ValueError('fill value is in {} but expected to be ' 'unitless.'.format(fill_value.unit)) fill_value = fill_value.to(lookup_table.unit).value self.points = points self.lookup_table = lookup_table self.bounds_error = bounds_error self.method = method self.fill_value = fill_value def __repr__(self): fmt = "<{}(points={}, lookup_table={})>".format( self.__class__.__name__, self.points, self.lookup_table) return fmt def __str__(self): default_keywords = [ ('Model', self.__class__.__name__), ('Name', self.name), ('N_inputs', self.n_inputs), ('N_outputs', self.n_outputs), ('Parameters', ""), (' points', self.points), (' lookup_table', self.lookup_table), (' method', self.method), (' fill_value', self.fill_value), (' bounds_error', self.bounds_error) ] parts = [f'{keyword}: {value}' for keyword, value in default_keywords if value is not None] return '\n'.join(parts) @property def input_units(self): pts = self.points[0] if not isinstance(pts, u.Quantity): return None return dict([(x, pts.unit) for x in self.inputs]) @property def return_units(self): if not isinstance(self.lookup_table, u.Quantity): return None return {self.outputs[0]: self.lookup_table.unit} @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(points_low, points_high)``. Examples -------- >>> from astropy.modeling.models import Tabular1D, Tabular2D >>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30]) >>> t1.bounding_box (1, 3) >>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]], ... lookup_table=[[10, 20, 30], [20, 30, 40]]) >>> t2.bounding_box ((2, 4), (1, 3)) """ bbox = [(min(p), max(p)) for p in self.points][::-1] if len(bbox) == 1: bbox = bbox[0] return tuple(bbox) def evaluate(self, *inputs): """ Return the interpolated values at the input coordinates. Parameters ---------- inputs : list of scalar or list of ndarray Input coordinates. The number of inputs must be equal to the dimensions of the lookup table. """ inputs = np.broadcast_arrays(*inputs) if isinstance(inputs, u.Quantity): inputs = inputs.value shape = inputs[0].shape inputs = [inp.flatten() for inp in inputs[: self.n_inputs]] inputs = np.array(inputs).T if not has_scipy: # pragma: no cover raise ImportError("Tabular model requires scipy.") result = interpn(self.points, self.lookup_table, inputs, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) # return_units not respected when points has no units if (isinstance(self.lookup_table, u.Quantity) and not isinstance(self.points[0], u.Quantity)): result = result * self.lookup_table.unit if self.n_outputs == 1: result = result.reshape(shape) else: result = [r.reshape(shape) for r in result] return result @property def inverse(self): if self.n_inputs == 1: # If the wavelength array is decending instead of ascending, both # points and lookup_table need to be reversed in the inverse transform # for scipy.interpolate to work properly if np.all(np.diff(self.lookup_table) > 0): # ascending case points = self.lookup_table lookup_table = self.points[0] elif np.all(np.diff(self.lookup_table) < 0): # descending case, reverse order points = self.lookup_table[::-1] lookup_table = self.points[0][::-1] else: # equal-valued or double-valued lookup_table raise NotImplementedError return Tabular1D(points=points, lookup_table=lookup_table, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value) raise NotImplementedError("An analytical inverse transform " "has not been implemented for this model.") def tabular_model(dim, name=None): """ Make a ``Tabular`` model where ``n_inputs`` is based on the dimension of the lookup_table. This model has to be further initialized and when evaluated returns the interpolated values. Parameters ---------- dim : int Dimensions of the lookup table. name : str Name for the class. Examples -------- >>> table = np.array([[3., 0., 0.], ... [0., 2., 0.], ... [0., 0., 0.]]) >>> tab = tabular_model(2, name='Tabular2D') >>> print(tab) <class 'astropy.modeling.tabular.Tabular2D'> Name: Tabular2D N_inputs: 2 N_outputs: 1 >>> points = ([1, 2, 3], [1, 2, 3]) Setting fill_value to None, allows extrapolation. >>> m = tab(points, lookup_table=table, name='my_table', ... bounds_error=False, fill_value=None, method='nearest') >>> xinterp = [0, 1, 1.5, 2.72, 3.14] >>> m(xinterp, xinterp) # doctest: +FLOAT_CMP array([3., 3., 3., 0., 0.]) """ if dim < 1: raise ValueError('Lookup table must have at least one dimension.') table = np.zeros([2] * dim) members = {'lookup_table': table, 'n_inputs': dim, 'n_outputs': 1} if dim == 1: members['_separable'] = True else: members['_separable'] = False if name is None: model_id = _Tabular._id _Tabular._id += 1 name = f'Tabular{model_id}' model_class = type(str(name), (_Tabular,), members) model_class.__module__ = 'astropy.modeling.tabular' return model_class Tabular1D = tabular_model(1, name='Tabular1D') Tabular2D = tabular_model(2, name='Tabular2D') _tab_docs = """ method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ Tabular1D.__doc__ = """ Tabular model in 1D. Returns an interpolated lookup table value. Parameters ---------- points : array-like of float of ndim=1. The points defining the regular grid in n dimensions. lookup_table : array-like, of ndim=1. The data in one dimensions. """ + _tab_docs Tabular2D.__doc__ = """ Tabular model in 2D. Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray with shapes (m1, m2). lookup_table : array-like The data on a regular grid in 2 dimensions. Shape (m1, m2). """ + _tab_docs
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
mhvk/astropy
astropy/nddata/tests/test_ccddata.py
astropy/modeling/tabular.py
from __future__ import print_function, absolute_import, division def get_package_data(): return { _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc', 'data/*.fits', 'data/*.hdr', 'data/*.lmv', 'data/*reg'] }
from __future__ import print_function, absolute_import, division from astropy import wcs from astropy.io import fits from astropy import units as u from astropy import constants from astropy.tests.helper import pytest, assert_quantity_allclose import numpy as np from .helpers import assert_allclose from . import path as data_path from ..spectral_axis import (convert_spectral_axis, determine_ctype_from_vconv, cdelt_derivative, determine_vconv_from_ctype, get_rest_value_from_wcs, air_to_vac, air_to_vac_deriv, vac_to_air, doppler_z, doppler_gamma, doppler_beta) def test_cube_wcs_freqtovel(): header = fits.Header.fromtextfile(data_path('cubewcs1.hdr')) w1 = wcs.WCS(header) # CTYPE3 = 'FREQ' newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD', rest_value=w1.wcs.restfrq*u.Hz) assert newwcs.wcs.ctype[2] == 'VRAD' assert newwcs.wcs.crval[2] == 305.2461585938794 assert newwcs.wcs.cunit[2] == u.Unit('km/s') newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD') assert newwcs.wcs.ctype[2] == 'VRAD' assert newwcs.wcs.crval[2] == 305.2461585938794 assert newwcs.wcs.cunit[2] == u.Unit('km/s') def test_cube_wcs_freqtovopt(): header = fits.Header.fromtextfile(data_path('cubewcs1.hdr')) w1 = wcs.WCS(header) w2 = convert_spectral_axis(w1, 'km/s', 'VOPT') # TODO: what should w2's values be? test them # these need to be set to zero to test the failure w1.wcs.restfrq = 0.0 w1.wcs.restwav = 0.0 with pytest.raises(ValueError) as exc: convert_spectral_axis(w1, 'km/s', 'VOPT') assert exc.value.args[0] == 'If converting from wavelength/frequency to speed, a reference wavelength/frequency is required.' @pytest.mark.parametrize('wcstype',('Z','W','R','V')) def test_greisen2006(wcstype): # This is the header extracted from Greisen 2006, including many examples # of valid transforms. It should be the gold standard (in principle) hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr')) # We have not implemented frame conversions, so we can only convert bary # <-> bary in this case wcs0 = wcs.WCS(hdr, key='F') wcs1 = wcs.WCS(hdr, key=wcstype) if wcstype in ('R','V','Z'): if wcs1.wcs.restfrq: rest = wcs1.wcs.restfrq*u.Hz elif wcs1.wcs.restwav: rest = wcs1.wcs.restwav*u.m else: rest = None outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec]) out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec] wcs2 = convert_spectral_axis(wcs0, outunit, out_ctype, rest_value=rest) assert_allclose(wcs2.wcs.cdelt[wcs2.wcs.spec], wcs1.wcs.cdelt[wcs1.wcs.spec], rtol=1.e-3) assert_allclose(wcs2.wcs.crval[wcs2.wcs.spec], wcs1.wcs.crval[wcs1.wcs.spec], rtol=1.e-3) assert wcs2.wcs.ctype[wcs2.wcs.spec] == wcs1.wcs.ctype[wcs1.wcs.spec] assert wcs2.wcs.cunit[wcs2.wcs.spec] == wcs1.wcs.cunit[wcs1.wcs.spec] # round trip test: inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec]) in_ctype = wcs0.wcs.ctype[wcs0.wcs.spec] wcs3 = convert_spectral_axis(wcs2, inunit, in_ctype, rest_value=rest) assert_allclose(wcs3.wcs.crval[wcs3.wcs.spec], wcs0.wcs.crval[wcs0.wcs.spec], rtol=1.e-3) assert_allclose(wcs3.wcs.cdelt[wcs3.wcs.spec], wcs0.wcs.cdelt[wcs0.wcs.spec], rtol=1.e-3) assert wcs3.wcs.ctype[wcs3.wcs.spec] == wcs0.wcs.ctype[wcs0.wcs.spec] assert wcs3.wcs.cunit[wcs3.wcs.spec] == wcs0.wcs.cunit[wcs0.wcs.spec] def test_byhand_f2v(): # VELO-F2V CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 RESTFRQV= 1.420405752E+09 CRVAL3V = 8.98134229811E+06 CDELT3V = -2.1217551E+04 CUNIT3V = 'm/s' CUNIT3F = 'Hz' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3V * u.Unit(CUNIT3V) restfreq = RESTFRQV * u.Unit(CUNIT3F) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3V * u.Unit(CUNIT3V) # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity # 8981342.29795544 m / s>, <Quantity 8981342.29795544 m / s>) (Pdb) # cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity # -21217.552294728768 m / s>, <Quantity -21217.552294728768 m / s>) crvalv_computed = crvalf.to(CUNIT3V, u.doppler_relativistic(restfreq)) cdeltv_computed = -4*constants.c*cdeltf*crvalf*restfreq**2 / (crvalf**2+restfreq**2)**2 cdeltv_computed_byfunction = cdelt_derivative(crvalf, cdeltf, intype='frequency', outtype='speed', rest=restfreq) # this should be EXACT assert cdeltv_computed == cdeltv_computed_byfunction assert_allclose(crvalv_computed, crvalv, rtol=1.e-3) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3) # round trip # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (<Quantity 8981342.29795544 m / s>, <Quantity 8981342.29795544 m / s>, # <Quantity 1377852479.159838 Hz>, <Quantity 1377852479.159838 Hz>) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (<Quantity -21217.552294728768 m / s>, <Quantity -21217.552294728768 m / # s>, <Quantity 97647.74999999997 Hz>, <Quantity 97647.74999999997 Hz>) crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_relativistic(restfreq)) cdeltf_computed = -(cdeltv_computed * constants.c * restfreq / ((constants.c+crvalv_computed)*(constants.c**2 - crvalv_computed**2)**0.5)) assert_allclose(crvalf_computed, crvalf, rtol=1.e-2) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-2) cdeltf_computed_byfunction = cdelt_derivative(crvalv_computed, cdeltv_computed, intype='speed', outtype='frequency', rest=restfreq) # this should be EXACT assert cdeltf_computed == cdeltf_computed_byfunction def test_byhand_vrad(): # VRAD CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 RESTFRQR= 1.420405752E+09 CRVAL3R = 8.85075090419E+06 CDELT3R = -2.0609645E+04 CUNIT3R = 'm/s' CUNIT3F = 'Hz' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3R * u.Unit(CUNIT3R) restfreq = RESTFRQR * u.Unit(CUNIT3F) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3R * u.Unit(CUNIT3R) # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity 8850750.904040769 m / s>, <Quantity 8850750.904040769 m / s>) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity -20609.645482954576 m / s>, <Quantity -20609.645482954576 m / s>) crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restfreq)) cdeltv_computed = -(cdeltf / restfreq)*constants.c assert_allclose(crvalv_computed, crvalv, rtol=1.e-3) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3) crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_radio(restfreq)) cdeltf_computed = -(cdeltv_computed/constants.c) * restfreq assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) # round trip: # (Pdb) crval_in,crval_lin1,crval_lin2,crval_out # (<Quantity 8850750.904040769 m / s>, <Quantity 8850750.904040769 m / s>, <Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>) # (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out # (<Quantity -20609.645482954576 m / s>, <Quantity -20609.645482954576 m / s>, <Quantity 94888.9338036023 Hz>, <Quantity 94888.9338036023 Hz>) # (Pdb) myunit,lin_cunit,out_lin_cunit,outunit # WRONG (Unit("m / s"), Unit("m / s"), Unit("Hz"), Unit("Hz")) def test_byhand_vopt(): # VOPT: case "Z" CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 CUNIT3F = 'Hz' RESTWAVZ= 0.211061139 #CTYPE3Z = 'VOPT-F2W' # This comes from Greisen 2006, but appears to be wrong: CRVAL3Z = 9.120000E+06 CRVAL3Z = 9.120002206E+06 CDELT3Z = -2.1882651E+04 CUNIT3Z = 'm/s' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalv = CRVAL3Z * u.Unit(CUNIT3Z) restwav = RESTWAVZ * u.m cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltv = CDELT3Z * u.Unit(CUNIT3Z) # Forward: freq -> vopt # crval: (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity 0.2174818410618759 m>, <Quantity 9120002.205689976 m / s>) # cdelt: (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity -1.540591649098696e-05 m>, <Quantity -21882.652554887027 m / s>) #crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restwav)) crvalw_computed = crvalf.to(u.m, u.spectral()) crvalw_computed32 = crvalf.astype('float32').to(u.m, u.spectral()) cdeltw_computed = -(cdeltf / crvalf**2)*constants.c cdeltw_computed_byfunction = cdelt_derivative(crvalf, cdeltf, intype='frequency', outtype='length', rest=None) # this should be EXACT assert cdeltw_computed == cdeltw_computed_byfunction crvalv_computed = crvalw_computed.to(CUNIT3Z, u.doppler_optical(restwav)) crvalv_computed32 = crvalw_computed32.astype('float32').to(CUNIT3Z, u.doppler_optical(restwav)) #cdeltv_computed = (cdeltw_computed * # 4*constants.c*crvalw_computed*restwav**2 / # (restwav**2+crvalw_computed**2)**2) cdeltv_computed = (cdeltw_computed / restwav)*constants.c cdeltv_computed_byfunction = cdelt_derivative(crvalw_computed, cdeltw_computed, intype='length', outtype='speed', rest=restwav, linear=True) # Disagreement is 2.5e-7: good, but not really great... #assert np.abs((crvalv_computed-crvalv)/crvalv) < 1e-6 assert_allclose(crvalv_computed, crvalv, rtol=1.e-2) assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-2) # Round=trip test: # from velo_opt -> freq # (<Quantity 9120002.205689976 m / s>, <Quantity 0.2174818410618759 m>, <Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>) # (<Quantity -21882.652554887027 m / s>, <Quantity -1.540591649098696e-05 m>, <Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>) crvalw_computed = crvalv_computed.to(u.m, u.doppler_optical(restwav)) cdeltw_computed = (cdeltv_computed/constants.c) * restwav cdeltw_computed_byfunction = cdelt_derivative(crvalv_computed, cdeltv_computed, intype='speed', outtype='length', rest=restwav, linear=True) assert cdeltw_computed == cdeltw_computed_byfunction crvalf_computed = crvalw_computed.to(CUNIT3F, u.spectral()) cdeltf_computed = -cdeltw_computed * constants.c / crvalw_computed**2 assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) cdeltf_computed_byfunction = cdelt_derivative(crvalw_computed, cdeltw_computed, intype='length', outtype='frequency', rest=None) assert cdeltf_computed == cdeltf_computed_byfunction # Fails intentionally (but not really worth testing) #crvalf_computed = crvalv_computed.to(CUNIT3F, u.spectral()+u.doppler_optical(restwav)) #cdeltf_computed = -(cdeltv_computed / constants.c) * restwav.to(u.Hz, u.spectral()) #assert_allclose(crvalf_computed, crvalf, rtol=1.e-3) #assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3) def test_byhand_f2w(): CRVAL3F = 1.37847121643E+09 CDELT3F = 9.764775E+04 CUNIT3F = 'Hz' #CTYPE3W = 'WAVE-F2W' CRVAL3W = 0.217481841062 CDELT3W = -1.5405916E-05 CUNIT3W = 'm' crvalf = CRVAL3F * u.Unit(CUNIT3F) crvalw = CRVAL3W * u.Unit(CUNIT3W) cdeltf = CDELT3F * u.Unit(CUNIT3F) cdeltw = CDELT3W * u.Unit(CUNIT3W) crvalf_computed = crvalw.to(CUNIT3F, u.spectral()) cdeltf_computed = -constants.c * cdeltw / crvalw**2 assert_allclose(crvalf_computed, crvalf, rtol=0.1) assert_allclose(cdeltf_computed, cdeltf, rtol=0.1) @pytest.mark.parametrize(('ctype','unit','velocity_convention','result'), (('VELO-F2V', "Hz", None, 'FREQ'), ('VELO-F2V', "m", None, 'WAVE-F2W'), ('VOPT', "m", None, 'WAVE'), ('VOPT', "Hz", None, 'FREQ-W2F'), ('VELO', "Hz", None, 'FREQ-V2F'), ('WAVE', "Hz", None, 'FREQ-W2F'), ('FREQ', 'm/s', None, ValueError('A velocity convention must be specified')), ('FREQ', 'm/s', u.doppler_radio, 'VRAD'), ('FREQ', 'm/s', u.doppler_optical, 'VOPT-F2W'), ('FREQ', 'm/s', u.doppler_relativistic, 'VELO-F2V'), ('WAVE', 'm/s', u.doppler_radio, 'VRAD-W2F'))) def test_ctype_determinator(ctype,unit,velocity_convention,result): if isinstance(result, Exception): with pytest.raises(Exception) as exc: determine_ctype_from_vconv(ctype, unit, velocity_convention=velocity_convention) assert exc.value.args[0] == result.args[0] assert type(exc.value) == type(result) else: outctype = determine_ctype_from_vconv(ctype, unit, velocity_convention=velocity_convention) assert outctype == result @pytest.mark.parametrize(('ctype','vconv'), (('VELO-F2W', u.doppler_optical), ('VELO-F2V', u.doppler_relativistic), ('VRAD', u.doppler_radio), ('VOPT', u.doppler_optical), ('VELO', u.doppler_relativistic), ('WAVE', u.doppler_optical), ('WAVE-F2W', u.doppler_optical), ('WAVE-V2W', u.doppler_optical), ('FREQ', u.doppler_radio), ('FREQ-V2F', u.doppler_radio), ('FREQ-W2F', u.doppler_radio),)) def test_vconv_determinator(ctype, vconv): assert determine_vconv_from_ctype(ctype) == vconv @pytest.fixture def filename(request): return request.getfixturevalue(request.param) @pytest.mark.parametrize(('filename'), (('data_advs'), ('data_dvsa'), ('data_sdav'), ('data_sadv'), ('data_vsad'), ('data_vad'), ('data_adv'), ), indirect=['filename']) def test_vopt_to_freq(filename): h = fits.getheader(filename) wcs0 = wcs.WCS(h) # check to make sure astropy.wcs's "fix" changes VELO-HEL to VOPT assert wcs0.wcs.ctype[wcs0.wcs.spec] == 'VOPT' out_ctype = determine_ctype_from_vconv('VOPT', u.Hz) wcs1 = convert_spectral_axis(wcs0, u.Hz, out_ctype) assert wcs1.wcs.ctype[wcs1.wcs.spec] == 'FREQ-W2F' @pytest.mark.parametrize('wcstype',('Z','W','R','V','F')) def test_change_rest_frequency(wcstype): # This is the header extracted from Greisen 2006, including many examples # of valid transforms. It should be the gold standard (in principle) hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr')) wcs0 = wcs.WCS(hdr, key=wcstype) old_rest = get_rest_value_from_wcs(wcs0) if old_rest is None: # This test doesn't matter if there was no rest frequency in the first # place but I prefer to keep the option open in case we want to try # forcing a rest frequency on some of the non-velocity frames at some # point return vconv1 = determine_vconv_from_ctype(hdr['CTYPE3'+wcstype]) new_rest = (100*u.km/u.s).to(u.Hz, vconv1(old_rest)) wcs1 = wcs.WCS(hdr, key='V') vconv2 = determine_vconv_from_ctype(hdr['CTYPE3V']) inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec]) outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec]) # VELO-F2V out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec] wcs2 = convert_spectral_axis(wcs0, outunit, out_ctype, rest_value=new_rest) sp1 = wcs1.sub([wcs.WCSSUB_SPECTRAL]) sp2 = wcs2.sub([wcs.WCSSUB_SPECTRAL]) p_old = sp1.wcs_world2pix([old_rest.to(inunit, vconv1(old_rest)).value, new_rest.to(inunit, vconv1(old_rest)).value],0) p_new = sp2.wcs_world2pix([old_rest.to(outunit, vconv2(new_rest)).value, new_rest.to(outunit, vconv2(new_rest)).value],0) assert_allclose(p_old, p_new, rtol=1e-3) assert_allclose(p_old, p_new, rtol=1e-3) # from http://classic.sdss.org/dr5/products/spectra/vacwavelength.html # these aren't accurate enough for my liking, but I can't find a better one readily air_vac = { 'H-beta':(4861.363, 4862.721)*u.AA, '[O III]':(4958.911, 4960.295)*u.AA, '[O III]':(5006.843, 5008.239)*u.AA, '[N II]':(6548.05, 6549.86)*u.AA, 'H-alpha':(6562.801, 6564.614)*u.AA, '[N II]':(6583.45, 6585.27)*u.AA, '[S II]':(6716.44, 6718.29)*u.AA, '[S II]':(6730.82, 6732.68)*u.AA, } @pytest.mark.parametrize(('air','vac'), air_vac.values()) def test_air_to_vac(air, vac): # This is the accuracy provided by the line list we have. # I'm not sure if the formula are incorrect or if the reference wavelengths # are, but this is an accuracy of only 6 km/s, which is *very bad* for # astrophysical applications. assert np.abs((air_to_vac(air)- vac)) < 0.15*u.AA assert np.abs((vac_to_air(vac)- air)) < 0.15*u.AA assert np.abs((air_to_vac(air)- vac)/vac) < 2e-5 assert np.abs((vac_to_air(vac)- air)/air) < 2e-5 # round tripping assert np.abs((vac_to_air(air_to_vac(air))-air))/air < 1e-8 assert np.abs((air_to_vac(vac_to_air(vac))-vac))/vac < 1e-8 def test_byhand_awav2vel(): # AWAV CRVAL3A = (6560*u.AA).to(u.m).value CDELT3A = (1.0*u.AA).to(u.m).value CUNIT3A = 'm' CRPIX3A = 1.0 # restwav MUST be vacuum restwl = air_to_vac(6562.81*u.AA) RESTWAV = restwl.to(u.m).value CRVAL3V = (CRVAL3A*u.m).to(u.m/u.s, u.doppler_optical(restwl)).value CDELT3V = (CDELT3A*u.m*air_to_vac_deriv(CRVAL3A*u.m)/restwl) * constants.c CUNIT3V = 'm/s' mywcs = wcs.WCS(naxis=1) mywcs.wcs.ctype[0] = 'AWAV' mywcs.wcs.crval[0] = CRVAL3A mywcs.wcs.crpix[0] = CRPIX3A mywcs.wcs.cunit[0] = CUNIT3A mywcs.wcs.cdelt[0] = CDELT3A mywcs.wcs.restwav = RESTWAV mywcs.wcs.set() newwcs = convert_spectral_axis(mywcs, u.km/u.s, determine_ctype_from_vconv(mywcs.wcs.ctype[0], u.km/u.s, 'optical')) newwcs.wcs.set() assert newwcs.wcs.cunit[0] == 'm / s' np.testing.assert_almost_equal(newwcs.wcs.crval, air_to_vac(CRVAL3A*u.m).to(u.m/u.s, u.doppler_optical(restwl)).value) # Check that the cdelts match the expected cdelt, 1 angstrom / rest # wavelength (vac) np.testing.assert_almost_equal(newwcs.wcs.cdelt, CDELT3V.to(u.m/u.s).value) # Check that the reference wavelength is 2.81 angstroms up np.testing.assert_almost_equal(newwcs.wcs_pix2world((2.81,), 0), 0.0, decimal=3) # Go through a full-on sanity check: vline = 100*u.km/u.s wave_line_vac = vline.to(u.AA, u.doppler_optical(restwl)) wave_line_air = vac_to_air(wave_line_vac) pix_line_input = mywcs.wcs_world2pix((wave_line_air.to(u.m).value,), 0) pix_line_output = newwcs.wcs_world2pix((vline.to(u.m/u.s).value,), 0) np.testing.assert_almost_equal(pix_line_output, pix_line_input, decimal=4) def test_byhand_awav2wav(): # AWAV CRVAL3A = (6560*u.AA).to(u.m).value CDELT3A = (1.0*u.AA).to(u.m).value CUNIT3A = 'm' CRPIX3A = 1.0 mywcs = wcs.WCS(naxis=1) mywcs.wcs.ctype[0] = 'AWAV' mywcs.wcs.crval[0] = CRVAL3A mywcs.wcs.crpix[0] = CRPIX3A mywcs.wcs.cunit[0] = CUNIT3A mywcs.wcs.cdelt[0] = CDELT3A mywcs.wcs.set() newwcs = convert_spectral_axis(mywcs, u.AA, 'WAVE') newwcs.wcs.set() np.testing.assert_almost_equal(newwcs.wcs_pix2world((0,),0), air_to_vac(mywcs.wcs_pix2world((0,),0)*u.m).value) np.testing.assert_almost_equal(newwcs.wcs_pix2world((10,),0), air_to_vac(mywcs.wcs_pix2world((10,),0)*u.m).value) # At least one of the components MUST change assert not (mywcs.wcs.crval[0] == newwcs.wcs.crval[0] and mywcs.wcs.crpix[0] == newwcs.wcs.crpix[0]) class test_nir_sinfoni_base(object): def setup_method(self, method): CD3_3 = 0.000245000002905726 # CD rotation matrix CTYPE3 = 'WAVE ' # wavelength axis in microns CRPIX3 = 1109. # Reference pixel in z CRVAL3 = 2.20000004768372 # central wavelength CDELT3 = 0.000245000002905726 # microns per pixel CUNIT3 = 'um ' # spectral unit SPECSYS = 'TOPOCENT' # Coordinate reference frame self.rest_wavelength = 2.1218*u.um self.mywcs = wcs.WCS(naxis=1) self.mywcs.wcs.ctype[0] = CTYPE3 self.mywcs.wcs.crval[0] = CRVAL3 self.mywcs.wcs.crpix[0] = CRPIX3 self.mywcs.wcs.cunit[0] = CUNIT3 self.mywcs.wcs.cdelt[0] = CDELT3 self.mywcs.wcs.cd = [[CD3_3]] self.mywcs.wcs.specsys = SPECSYS self.mywcs.wcs.set() self.wavelengths = np.array([[2.12160005e-06, 2.12184505e-06, 2.12209005e-06]]) np.testing.assert_almost_equal(self.mywcs.wcs_pix2world([788,789,790], 0), self.wavelengths) def test_nir_sinfoni_example_optical(self): mywcs = self.mywcs.copy() velocities_opt = ((self.wavelengths*u.m-self.rest_wavelength)/(self.wavelengths*u.m) * constants.c).to(u.km/u.s) newwcs_opt = convert_spectral_axis(mywcs, u.km/u.s, 'VOPT', rest_value=self.rest_wavelength) assert newwcs_opt.wcs.cunit[0] == u.km/u.s newwcs_opt.wcs.set() worldpix_opt = newwcs_opt.wcs_pix2world([788,789,790], 0) assert newwcs_opt.wcs.cunit[0] == u.m/u.s np.testing.assert_almost_equal(worldpix_opt, velocities_opt.to(newwcs_opt.wcs.cunit[0]).value) def test_nir_sinfoni_example_radio(self): mywcs = self.mywcs.copy() velocities_rad = ((self.wavelengths*u.m-self.rest_wavelength)/(self.rest_wavelength) * constants.c).to(u.km/u.s) newwcs_rad = convert_spectral_axis(mywcs, u.km/u.s, 'VRAD', rest_value=self.rest_wavelength) assert newwcs_rad.wcs.cunit[0] == u.km/u.s newwcs_rad.wcs.set() worldpix_rad = newwcs_rad.wcs_pix2world([788,789,790], 0) assert newwcs_rad.wcs.cunit[0] == u.m/u.s np.testing.assert_almost_equal(worldpix_rad, velocities_rad.to(newwcs_rad.wcs.cunit[0]).value) def test_equivalencies(): """ Testing spectral equivalencies """ # range in "RADIO" with "100 * u.GHz" as rest frequancy range = u.Quantity([-318 * u.km / u.s, -320 * u.km / u.s]) # range in freq r1 = range.to("GHz", equivalencies=u.doppler_radio(100 * u.GHz)) # round conversion for "doppler_z" r2 = r1.to("km/s", equivalencies=doppler_z(100 * u.GHz)) r3 = r2.to("GHz", equivalencies=doppler_z(100*u.GHz)) assert_quantity_allclose(r1, r3) # round conversion for "doppler_beta" r2 = r1.to("km/s", equivalencies=doppler_beta(100 * u.GHz)) r3 = r2.to("GHz", equivalencies=doppler_beta(100 * u.GHz)) assert_quantity_allclose(r1, r3) # round conversion for "doppler_gamma" r2 = r1.to("km/s", equivalencies=doppler_gamma(100 * u.GHz)) r3 = r2.to("GHz", equivalencies=doppler_gamma(100 * u.GHz)) assert_quantity_allclose(r1, r3)
e-koch/spectral-cube
spectral_cube/tests/test_spectral_axis.py
spectral_cube/tests/setup_package.py
"""Provide the MessageableMixin class.""" from ....const import API_PATH class MessageableMixin(object): """Interface for classes that can be messaged.""" def message(self, subject, message, from_subreddit=None): """ Send a message to a redditor or a subreddit's moderators (mod mail). :param subject: The subject of the message. :param message: The message content. :param from_subreddit: A Subreddit instance or string to send the message from. When provided, messages are sent from the subreddit rather than from the authenticated user. Note that the authenticated user must be a moderator of the subreddit and have mail permissions. For example, to send a private message to ``/u/spez``, try: .. code:: python reddit.redditor('spez').message('TEST', 'test message from PRAW') To send a message to ``u/spez`` from the moderators of ``r/test`` try: .. code:: python reddit.redditor('spez').message('TEST', 'test message from r/test', from_subreddit='test') To send a message to the moderators of ``/r/test``, try: .. code:: python reddit.subreddit('test').message('TEST', 'test PM from PRAW') """ data = { "subject": subject, "text": message, "to": "{}{}".format( getattr(self.__class__, "MESSAGE_PREFIX", ""), self ), } if from_subreddit: data["from_sr"] = str(from_subreddit) self._reddit.post(API_PATH["compose"], data=data)
import pickle import pytest from praw.models import Redditor from ... import UnitTest class TestRedditor(UnitTest): def test_equality(self): redditor1 = Redditor(self.reddit, _data={"name": "dummy1", "n": 1}) redditor2 = Redditor(self.reddit, _data={"name": "Dummy1", "n": 2}) redditor3 = Redditor(self.reddit, _data={"name": "dummy3", "n": 2}) assert redditor1 == redditor1 assert redditor2 == redditor2 assert redditor3 == redditor3 assert redditor1 == redditor2 assert redditor2 != redditor3 assert redditor1 != redditor3 assert "dummy1" == redditor1 assert redditor2 == "dummy1" def test_construct_failure(self): message = "Either `name` or `_data` must be provided." with pytest.raises(TypeError) as excinfo: Redditor(self.reddit) assert str(excinfo.value) == message with pytest.raises(TypeError) as excinfo: Redditor(self.reddit, "dummy", {"id": "dummy"}) assert str(excinfo.value) == message with pytest.raises(AssertionError): Redditor(self.reddit, _data=[{"name": "dummy"}]) with pytest.raises(AssertionError): Redditor(self.reddit, _data={"notname": "dummy"}) def test_fullname(self): redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"}) assert redditor.fullname == "t2_dummy" def test_guild__min(self): with pytest.raises(TypeError) as excinfo: Redditor(self.reddit, name="RedditorName").gild(0) assert str(excinfo.value) == "months must be between 1 and 36" def test_guild__max(self): with pytest.raises(TypeError) as excinfo: Redditor(self.reddit, name="RedditorName").gild(37) assert str(excinfo.value) == "months must be between 1 and 36" def test_hash(self): redditor1 = Redditor(self.reddit, _data={"name": "dummy1", "n": 1}) redditor2 = Redditor(self.reddit, _data={"name": "Dummy1", "n": 2}) redditor3 = Redditor(self.reddit, _data={"name": "dummy3", "n": 2}) assert hash(redditor1) == hash(redditor1) assert hash(redditor2) == hash(redditor2) assert hash(redditor3) == hash(redditor3) assert hash(redditor1) == hash(redditor2) assert hash(redditor2) != hash(redditor3) assert hash(redditor1) != hash(redditor3) def test_pickle(self): redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"}) for level in range(pickle.HIGHEST_PROTOCOL + 1): other = pickle.loads(pickle.dumps(redditor, protocol=level)) assert redditor == other def test_repr(self): redditor = Redditor(self.reddit, name="RedditorName") assert repr(redditor) == "Redditor(name='RedditorName')" def test_str(self): redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"}) assert str(redditor) == "name" class TestRedditorListings(UnitTest): def test__params_not_modified_in_mixed_listing(self): params = {"dummy": "value"} redditor = Redditor(self.reddit, name="spez") for listing in ["controversial", "hot", "new", "top"]: generator = getattr(redditor, listing)(params=params) assert params == {"dummy": "value"} assert listing == generator.params["sort"] assert "value" == generator.params["dummy"]
leviroth/praw
tests/unit/models/reddit/test_redditor.py
praw/models/reddit/mixins/messageable.py
from warnings import catch_warnings import numpy as np from pandas.core.dtypes import generic as gt import pandas as pd import pandas._testing as tm class TestABCClasses: tuples = [[1, 2, 2], ["red", "blue", "red"]] multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color")) datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"]) timedelta_index = pd.to_timedelta(np.arange(5), unit="s") period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M") categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1]) categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical) df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index) sparse_array = pd.arrays.SparseArray(np.random.randn(10)) datetime_array = pd.core.arrays.DatetimeArray(datetime_index) timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index) def test_abc_types(self): assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex) assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index) assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index) assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index) assert isinstance(self.multi_index, gt.ABCMultiIndex) assert isinstance(self.datetime_index, gt.ABCDatetimeIndex) assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex) assert isinstance(self.period_index, gt.ABCPeriodIndex) assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex) assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndexClass) assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass) assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries) assert isinstance(self.df, gt.ABCDataFrame) assert isinstance(self.sparse_array, gt.ABCExtensionArray) assert isinstance(self.categorical, gt.ABCCategorical) assert isinstance(self.datetime_array, gt.ABCDatetimeArray) assert not isinstance(self.datetime_index, gt.ABCDatetimeArray) assert isinstance(self.timedelta_array, gt.ABCTimedeltaArray) assert not isinstance(self.timedelta_index, gt.ABCTimedeltaArray) def test_setattr_warnings(): # GH7175 - GOTCHA: You can't use dot notation to add a column... d = { "one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]), "two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]), } df = pd.DataFrame(d) with catch_warnings(record=True) as w: # successfully add new column # this should not raise a warning df["three"] = df.two + 1 assert len(w) == 0 assert df.three.sum() > df.two.sum() with catch_warnings(record=True) as w: # successfully modify column in place # this should not raise a warning df.one += 1 assert len(w) == 0 assert df.one.iloc[0] == 2 with catch_warnings(record=True) as w: # successfully add an attribute to a series # this should not raise a warning df.two.not_an_index = [1, 2] assert len(w) == 0 with tm.assert_produces_warning(UserWarning): # warn when setting column to nonexistent name df.four = df.two + 2 assert df.four.sum() > df.two.sum()
import pytest from pandas import Index, MultiIndex, Series import pandas._testing as tm class TestSeriesRenameAxis: def test_rename_axis_mapper(self): # GH 19978 mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) ser = Series(list(range(len(mi))), index=mi) result = ser.rename_axis(index={"ll": "foo"}) assert result.index.names == ["foo", "nn"] result = ser.rename_axis(index=str.upper, axis=0) assert result.index.names == ["LL", "NN"] result = ser.rename_axis(index=["foo", "goo"]) assert result.index.names == ["foo", "goo"] with pytest.raises(TypeError, match="unexpected"): ser.rename_axis(columns="wrong") def test_rename_axis_inplace(self, datetime_series): # GH 15704 expected = datetime_series.rename_axis("foo") result = datetime_series no_return = result.rename_axis("foo", inplace=True) assert no_return is None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}]) def test_rename_axis_none(self, kwargs): # GH 25034 index = Index(list("abc"), name="foo") ser = Series([1, 2, 3], index=index) result = ser.rename_axis(**kwargs) expected_index = index.rename(None) if kwargs else index expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected)
TomAugspurger/pandas
pandas/tests/series/methods/test_rename_axis.py
pandas/tests/dtypes/test_generic.py
import pandas as pd import pandas._testing as tm class TestUnaryOps: def test_invert(self): a = pd.array([True, False, None], dtype="boolean") expected = pd.array([False, True, None], dtype="boolean") tm.assert_extension_array_equal(~a, expected) expected = pd.Series(expected, index=["a", "b", "c"], name="name") result = ~pd.Series(a, index=["a", "b", "c"], name="name") tm.assert_series_equal(result, expected) df = pd.DataFrame({"A": a, "B": [True, False, False]}, index=["a", "b", "c"]) result = ~df expected = pd.DataFrame( {"A": expected, "B": [False, True, True]}, index=["a", "b", "c"] ) tm.assert_frame_equal(result, expected)
import pytest from pandas import Index, MultiIndex, Series import pandas._testing as tm class TestSeriesRenameAxis: def test_rename_axis_mapper(self): # GH 19978 mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) ser = Series(list(range(len(mi))), index=mi) result = ser.rename_axis(index={"ll": "foo"}) assert result.index.names == ["foo", "nn"] result = ser.rename_axis(index=str.upper, axis=0) assert result.index.names == ["LL", "NN"] result = ser.rename_axis(index=["foo", "goo"]) assert result.index.names == ["foo", "goo"] with pytest.raises(TypeError, match="unexpected"): ser.rename_axis(columns="wrong") def test_rename_axis_inplace(self, datetime_series): # GH 15704 expected = datetime_series.rename_axis("foo") result = datetime_series no_return = result.rename_axis("foo", inplace=True) assert no_return is None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}]) def test_rename_axis_none(self, kwargs): # GH 25034 index = Index(list("abc"), name="foo") ser = Series([1, 2, 3], index=index) result = ser.rename_axis(**kwargs) expected_index = index.rename(None) if kwargs else index expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected)
TomAugspurger/pandas
pandas/tests/series/methods/test_rename_axis.py
pandas/tests/arrays/boolean/test_ops.py
from contextlib import contextmanager from pandas.plotting._core import _get_plot_backend def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ Helper function to convert DataFrame and Series to matplotlib.table. Parameters ---------- ax : Matplotlib axes object data : DataFrame or Series Data for table contents. **kwargs Keyword arguments to be passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. Returns ------- matplotlib table object """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.table( ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs ) def register(): """ Register pandas formatters and converters with matplotlib. This function modifies the global ``matplotlib.units.registry`` dictionary. pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -------- deregister_matplotlib_converters : Remove pandas formatters and converters. """ plot_backend = _get_plot_backend("matplotlib") plot_backend.register() def deregister(): """ Remove pandas formatters and converters. Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``datetime.datetime``, are restored to their original value. See Also -------- register_matplotlib_converters : Register pandas formatters and converters with matplotlib. """ plot_backend = _get_plot_backend("matplotlib") plot_backend.deregister() def scatter_matrix( frame, alpha=0.5, figsize=None, ax=None, grid=False, diagonal="hist", marker=".", density_kwds=None, hist_kwds=None, range_padding=0.05, **kwargs, ): """ Draw a matrix of scatter plots. Parameters ---------- frame : DataFrame alpha : float, optional Amount of transparency applied. figsize : (float,float), optional A tuple (width, height) in inches. ax : Matplotlib axis object, optional grid : bool, optional Setting this to True will show the grid. diagonal : {'hist', 'kde'} Pick between 'kde' and 'hist' for either Kernel Density Estimation or Histogram plot in the diagonal. marker : str, optional Matplotlib marker type, default '.'. density_kwds : keywords Keyword arguments to be passed to kernel density estimate plot. hist_kwds : keywords Keyword arguments to be passed to hist function. range_padding : float, default 0.05 Relative extension of axis range in x and y with respect to (x_max - x_min) or (y_max - y_min). **kwargs Keyword arguments to be passed to scatter function. Returns ------- numpy.ndarray A matrix of scatter plots. Examples -------- .. plot:: :context: close-figs >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) >>> pd.plotting.scatter_matrix(df, alpha=0.2) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.scatter_matrix( frame=frame, alpha=alpha, figsize=figsize, ax=ax, grid=grid, diagonal=diagonal, marker=marker, density_kwds=density_kwds, hist_kwds=hist_kwds, range_padding=range_padding, **kwargs, ) def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): """ Plot a multidimensional dataset in 2D. Each Series in the DataFrame is represented as a evenly distributed slice on a circle. Each data point is rendered in the circle according to the value on each Series. Highly correlated `Series` in the `DataFrame` are placed closer on the unit circle. RadViz allow to project a N-dimensional data set into a 2D space where the influence of each dimension can be interpreted as a balance between the influence of all dimensions. More info available at the `original article <https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_ describing RadViz. Parameters ---------- frame : `DataFrame` pandas object holding the data. class_column : str Column name containing the name of the data point category. ax : :class:`matplotlib.axes.Axes`, optional A plot instance to which to add the information. color : list[str] or tuple[str], optional Assign a color to each category. Example: ['blue', 'green']. colormap : str or :class:`matplotlib.colors.Colormap`, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. **kwds Options to pass to matplotlib scatter plotting method. Returns ------- class:`matplotlib.axes.Axes` See Also -------- plotting.andrews_curves : Plot clustering visualization. Examples -------- .. plot:: :context: close-figs >>> df = pd.DataFrame( ... { ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6], ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6], ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0], ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2], ... 'Category': [ ... 'virginica', ... 'virginica', ... 'setosa', ... 'virginica', ... 'virginica', ... 'versicolor', ... 'versicolor', ... 'setosa', ... 'virginica', ... 'setosa' ... ] ... } ... ) >>> pd.plotting.radviz(df, 'Category') """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.radviz( frame=frame, class_column=class_column, ax=ax, color=color, colormap=colormap, **kwds, ) def andrews_curves( frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwargs ): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0). class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes. colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlip.axis.Axes` Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.andrews_curves(df, 'Name') """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.andrews_curves( frame=frame, class_column=class_column, ax=ax, samples=samples, color=color, colormap=colormap, **kwargs, ) def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): """ Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relaying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series pandas Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be greater or equal than the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- DataFrame.plot : Basic plotting for DataFrame objects. Series.plot : Basic plotting for Series objects. Examples -------- This example draws a basic bootstap plot for a Series. .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> pd.plotting.bootstrap_plot(s) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.bootstrap_plot( series=series, fig=fig, size=size, samples=samples, **kwds ) def parallel_coordinates( frame, class_column, cols=None, ax=None, color=None, use_columns=False, xticks=None, colormap=None, axvlines=True, axvlines_kwds=None, sort_labels=False, **kwargs, ): """ Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlib.axis.Axes` Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') ... ) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.parallel_coordinates( frame=frame, class_column=class_column, cols=cols, ax=ax, color=color, use_columns=use_columns, xticks=xticks, colormap=colormap, axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, **kwargs, ) def lag_plot(series, lag=1, ax=None, **kwds): """ Lag plot for time series. Parameters ---------- series : Time series lag : lag of the scatter plot, default 1 ax : Matplotlib axis object, optional **kwds Matplotlib scatter method keyword arguments. Returns ------- class:`matplotlib.axis.Axes` Examples -------- Lag plots are most commonly used to look for patterns in time series data. Given the following time series .. plot:: :context: close-figs >>> np.random.seed(5) >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) >>> s = pd.Series(x) >>> s.plot() A lag plot with ``lag=1`` returns .. plot:: :context: close-figs >>> pd.plotting.lag_plot(s, lag=1) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) def autocorrelation_plot(series, ax=None, **kwargs): """ Autocorrelation plot for time series. Parameters ---------- series : Time series ax : Matplotlib axis object, optional **kwargs Options to pass to matplotlib plotting method. Returns ------- class:`matplotlib.axis.Axes` Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) class _Options(dict): """ Stores pandas plotting options. Allows for parameter aliasing so you can just use parameter names that are the same as the plot function parameters, but is stored in a canonical format that makes it easy to breakdown into groups later. """ # alias so the names are same as plotting method parameter names _ALIASES = {"x_compat": "xaxis.compat"} _DEFAULT_KEYS = ["xaxis.compat"] def __init__(self, deprecated=False): self._deprecated = deprecated super().__setitem__("xaxis.compat", False) def __getitem__(self, key): key = self._get_canonical_key(key) if key not in self: raise ValueError(f"{key} is not a valid pandas plotting option") return super().__getitem__(key) def __setitem__(self, key, value): key = self._get_canonical_key(key) return super().__setitem__(key, value) def __delitem__(self, key): key = self._get_canonical_key(key) if key in self._DEFAULT_KEYS: raise ValueError(f"Cannot remove default parameter {key}") return super().__delitem__(key) def __contains__(self, key) -> bool: key = self._get_canonical_key(key) return super().__contains__(key) def reset(self): """ Reset the option store to its initial state Returns ------- None """ self.__init__() def _get_canonical_key(self, key): return self._ALIASES.get(key, key) @contextmanager def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value plot_params = _Options()
import pytest from pandas import Index, MultiIndex, Series import pandas._testing as tm class TestSeriesRenameAxis: def test_rename_axis_mapper(self): # GH 19978 mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) ser = Series(list(range(len(mi))), index=mi) result = ser.rename_axis(index={"ll": "foo"}) assert result.index.names == ["foo", "nn"] result = ser.rename_axis(index=str.upper, axis=0) assert result.index.names == ["LL", "NN"] result = ser.rename_axis(index=["foo", "goo"]) assert result.index.names == ["foo", "goo"] with pytest.raises(TypeError, match="unexpected"): ser.rename_axis(columns="wrong") def test_rename_axis_inplace(self, datetime_series): # GH 15704 expected = datetime_series.rename_axis("foo") result = datetime_series no_return = result.rename_axis("foo", inplace=True) assert no_return is None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}]) def test_rename_axis_none(self, kwargs): # GH 25034 index = Index(list("abc"), name="foo") ser = Series([1, 2, 3], index=index) result = ser.rename_axis(**kwargs) expected_index = index.rename(None) if kwargs else index expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected)
TomAugspurger/pandas
pandas/tests/series/methods/test_rename_axis.py
pandas/plotting/_misc.py
""" Helper functions to generate range-like data for DatetimeArray (and possibly TimedeltaArray/PeriodArray) """ from typing import Union import numpy as np from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp from pandas.tseries.offsets import DateOffset def generate_regular_range( start: Union[Timestamp, Timedelta], end: Union[Timestamp, Timedelta], periods: int, freq: DateOffset, ): """ Generate a range of dates or timestamps with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timedelta, Timestamp or None First point of produced date range. end : Timedelta, Timestamp or None Last point of produced date range. periods : int Number of periods in produced date range. freq : Tick Describes space between dates in produced date range. Returns ------- ndarray[np.int64] Representing nanoseconds. """ start = start.value if start is not None else None end = end.value if end is not None else None stride = freq.nanos if periods is None: b = start # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 e = b + (end - b) // stride * stride + stride // 2 + 1 elif start is not None: b = start e = _generate_range_overflow_safe(b, periods, stride, side="start") elif end is not None: e = end + stride b = _generate_range_overflow_safe(e, periods, stride, side="end") else: raise ValueError( "at least 'start' or 'end' should be specified if a 'period' is given." ) with np.errstate(over="raise"): # If the range is sufficiently large, np.arange may overflow # and incorrectly return an empty array if not caught. try: values = np.arange(b, e, stride, dtype=np.int64) except FloatingPointError: xdr = [b] while xdr[-1] != e: xdr.append(xdr[-1] + stride) values = np.array(xdr[:-1], dtype=np.int64) return values def _generate_range_overflow_safe( endpoint: int, periods: int, stride: int, side: str = "start" ) -> int: """ Calculate the second endpoint for passing to np.arange, checking to avoid an integer overflow. Catch OverflowError and re-raise as OutOfBoundsDatetime. Parameters ---------- endpoint : int nanosecond timestamp of the known endpoint of the desired range periods : int number of periods in the desired range stride : int nanoseconds between periods in the desired range side : {'start', 'end'} which end of the range `endpoint` refers to Returns ------- other_end : int Raises ------ OutOfBoundsDatetime """ # GH#14187 raise instead of incorrectly wrapping around assert side in ["start", "end"] i64max = np.uint64(np.iinfo(np.int64).max) msg = f"Cannot generate range with {side}={endpoint} and periods={periods}" with np.errstate(over="raise"): # if periods * strides cannot be multiplied within the *uint64* bounds, # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) except FloatingPointError as err: raise OutOfBoundsDatetime(msg) from err if np.abs(addend) <= i64max: # relatively easy case without casting concerns return _generate_range_overflow_safe_signed(endpoint, periods, stride, side) elif (endpoint > 0 and side == "start" and stride > 0) or ( endpoint < 0 and side == "end" and stride > 0 ): # no chance of not-overflowing raise OutOfBoundsDatetime(msg) elif side == "end" and endpoint > i64max and endpoint - stride <= i64max: # in _generate_regular_range we added `stride` thereby overflowing # the bounds. Adjust to fix this. return _generate_range_overflow_safe( endpoint - stride, periods - 1, stride, side ) # split into smaller pieces mid_periods = periods // 2 remaining = periods - mid_periods assert 0 < remaining < periods, (remaining, periods, endpoint, stride) midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side) return _generate_range_overflow_safe(midpoint, remaining, stride, side) def _generate_range_overflow_safe_signed( endpoint: int, periods: int, stride: int, side: str ) -> int: """ A special case for _generate_range_overflow_safe where `periods * stride` can be calculated without overflowing int64 bounds. """ assert side in ["start", "end"] if side == "end": stride *= -1 with np.errstate(over="raise"): addend = np.int64(periods) * np.int64(stride) try: # easy case with no overflows return np.int64(endpoint) + addend except (FloatingPointError, OverflowError): # with endpoint negative and addend positive we risk # FloatingPointError; with reversed signed we risk OverflowError pass # if stride and endpoint had opposite signs, then endpoint + addend # should never overflow. so they must have the same signs assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0) if stride > 0: # watch out for very special case in which we just slightly # exceed implementation bounds, but when passing the result to # np.arange will get a result slightly within the bounds result = np.uint64(endpoint) + np.uint64(addend) i64max = np.uint64(np.iinfo(np.int64).max) assert result > i64max if result <= i64max + np.uint64(stride): return result raise OutOfBoundsDatetime( f"Cannot generate range with {side}={endpoint} and periods={periods}" )
import pytest from pandas import Index, MultiIndex, Series import pandas._testing as tm class TestSeriesRenameAxis: def test_rename_axis_mapper(self): # GH 19978 mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) ser = Series(list(range(len(mi))), index=mi) result = ser.rename_axis(index={"ll": "foo"}) assert result.index.names == ["foo", "nn"] result = ser.rename_axis(index=str.upper, axis=0) assert result.index.names == ["LL", "NN"] result = ser.rename_axis(index=["foo", "goo"]) assert result.index.names == ["foo", "goo"] with pytest.raises(TypeError, match="unexpected"): ser.rename_axis(columns="wrong") def test_rename_axis_inplace(self, datetime_series): # GH 15704 expected = datetime_series.rename_axis("foo") result = datetime_series no_return = result.rename_axis("foo", inplace=True) assert no_return is None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}]) def test_rename_axis_none(self, kwargs): # GH 25034 index = Index(list("abc"), name="foo") ser = Series([1, 2, 3], index=index) result = ser.rename_axis(**kwargs) expected_index = index.rename(None) if kwargs else index expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected)
TomAugspurger/pandas
pandas/tests/series/methods/test_rename_axis.py
pandas/core/arrays/_ranges.py
""" NTP sources - remote clock info from ``ntpq`` and ``chronyc`` ============================================================= The parsers here provide information about the time sources used by ``ntpd`` and ``chronyd``. These are gathered from the output of the ``ntpq -pn`` and ``chronyc sources`` commands respectively. There is also a parser for parsing the output of ``ntpq -c 'rv 0 leap'`` command to give leap second status. Parsers in this module are: ChronycSources - command ``/usr/bin/chronyc sources`` ----------------------------------------------------- NtpqLeap - command ``/usr/sbin/ntpq -c 'rv 0 leap'`` ---------------------------------------------------- NtpqPn - command ``/usr/sbin/ntpq -pn`` --------------------------------------- """ import re from .. import parser, CommandParser from insights.core.dr import SkipComponent from insights.specs import Specs @parser(Specs.chronyc_sources) class ChronycSources(CommandParser): """ Chronyc Sources parser Parses the list of NTP time sources in use by ``chronyd``. So far only the source IP address and the mode and the state flags are retrieved. Sample input:: 210 Number of sources = 6 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^- 10.20.30.40 2 9 377 95 -1345us[-1345us] +/- 87ms ^- 10.56.72.8 2 10 377 949 -3449us[-3483us] +/- 120ms ^* 10.64.108.95 2 10 377 371 -91us[ -128us] +/- 30ms ^- 10.8.205.17 2 8 377 27 +7161us[+7161us] +/- 52ms Examples: >>> sources = shared[ChronycSources].data >>> len(sources) 4 >>> sources[0]['source'] '10.20.30.40' >>> sources[0]['mode'] '^' >>> sources[0]['state'] '-' """ def parse_content(self, content): """ Get source, mode and state for chrony """ self.data = [] for row in content[3:]: if row.strip(): values = row.split(" ", 2) self.data.append({"source": values[1], "mode": values[0][0], "state": values[0][1]}) @parser(Specs.ntpq_leap) class NtpqLeap(CommandParser): """ Converts the output of ``ntpq -c 'rv 0 leap'`` into a dictionary in the ``data`` property, and sets the ``leap`` property to the value of the 'leap' key if found. Sample input:: leap=00 Examples: >>> print shared[NtpqLeap].leap # same data '00' """ def parse_content(self, content): if "Connection refused" in content[0]: raise SkipComponent("NTP service is down and connection refused") self.data = {} for line in content: m = re.search(r'leap=(\d*)', line) if m: self.data["leap"] = m.group(1) @property def leap(self): return self.data.get('leap') @parser(Specs.ntpq_pn) class NtpqPn(CommandParser): """ Get source and flag for each NTP time source from the output of ``/usr/sbin/ntpq -pn``. Currently, this only captures the source IP address and the 'flag' character in the first column at this stage. Therefore it will need to be extended should you wish to determine the stratum, polling rate or other properties of the source. Sample input:: remote refid st t when poll reach delay offset jitter ============================================================================== +10.20.30.40 192.231.203.132 3 u 638 1024 377 0.242 2.461 1.886 *2001:388:608c:8 .GPS. 1 u 371 1024 377 29.323 1.939 1.312 -2001:44b8:1::1 216.218.254.202 2 u 396 1024 377 37.869 -3.340 6.458 +150.203.1.10 202.6.131.118 2 u 509 1024 377 20.135 0.800 3.260 Examples: >>> sources = shared[NtpqPn].data >>> len(sources) 4 >>> sources[0] {'flag': '*', 'source', '10.20.30.40'} """ def parse_content(self, content): if "Connection refused" in content[0]: raise SkipComponent("NTP service is down and connection refused") self.data = [] for row in content[2:]: if row.strip(): values = row.split(" ", 2) if row.startswith(" "): self.data.append({"source": values[1], "flag": " "}) else: self.data.append({"source": values[0][1:], "flag": values[0][0]})
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/ntp_sources.py
from insights.parsers.virt_what import VirtWhat from insights.tests import context_wrap errors = ["virt-what: virt-what-cpuid-helper program not found in $PATH"] VW_OUT1 = """ kvm """.strip() # baremetal retuns blank VW_OUT2 = """ """.strip() # occasionally we have more than 1 line of output VW_OUT3 = """ xen xen-dom0 aws """.strip() def test_kvm(): v1 = VirtWhat(context_wrap(VW_OUT1)) assert v1.is_virtual is True assert v1.generic == "kvm" assert v1.specifics == [] assert v1.errors == [] def test_bEaR_metal(): v2 = VirtWhat(context_wrap(VW_OUT2)) assert v2.is_physical is True assert v2.generic == "baremetal" assert v2.specifics == [] assert v2.errors == [] def test_xen(): v3 = VirtWhat(context_wrap(VW_OUT3)) assert v3.is_virtual is True assert v3.generic == "xen" assert "xen-dom0" in v3 assert "xen" in v3 assert "aws" in v3 assert v3.errors == [] def test_error_handling(): v = VirtWhat(context_wrap(errors[0])) assert v.generic == '' assert v.specifics == [] assert v.is_virtual is None assert v.is_physical is None assert v.errors == errors
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/tests/test_virt_what.py
""" RsyslogConf - file ``/etc/rsyslog.conf`` ======================================== The rsyslog configuration files can include statements with two different line based formats along with snippets of 'RainerScript' that can span multiple lines. See http://www.rsyslog.com/doc/master/configuration/basic_structure.html#statement-types Due to high parsing complexity, this parser presents a simple line-based view of the file that meets the needs of the current rules. """ from .. import Parser, parser, get_active_lines from insights.specs import Specs from insights.core.filters import add_filter add_filter(Specs.rsyslog_conf, ["{", "}", "(", ")"]) @parser(Specs.rsyslog_conf) class RsyslogConf(Parser, list): """ Parses `/etc/rsyslog.conf` content. Skips lines that begin with hash ("#") or are only whitespace. Attributes: data (list): List of lines in the file that don't start with '#' and aren't whitespace. Example: >>> type(rsysconf) <class 'insights.parsers.rsyslog_conf.RsyslogConf'> >>> len(rsysconf) 13 >>> rsysconf[2] 'authpriv.* /var/log/secure' """ def __init__(self, *args, **kwargs): super(RsyslogConf, self).__init__(*args, **kwargs) def parse_content(self, content): data = [] brace_flag = False parenthesis_flag = False parenthesis_string = "" brace_string = "" for line in get_active_lines(content): l_strip = line.strip() # Combine multi lines in brace into one line if brace_flag: brace_string = brace_string + " " + l_strip if "}" in l_strip: data.append(brace_string) brace_string = "" brace_flag = False continue else: if "{" in l_strip: if "}" in l_strip: data.append(l_strip) else: brace_flag = True brace_string = l_strip continue # Combine multi lines in parenthesis and not in brace into one line if parenthesis_flag: parenthesis_string = parenthesis_string + " " + l_strip if ")" in l_strip: data.append(parenthesis_string) parenthesis_string = "" parenthesis_flag = False continue else: if "(" in l_strip: if ")" in l_strip: data.append(l_strip) else: parenthesis_flag = True parenthesis_string = l_strip continue else: data.append(l_strip) self.extend(data)
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/rsyslog_conf.py
import doctest from datetime import datetime from insights.parsers import ovirt_engine_log from insights.tests import context_wrap SERVER_LOG = """ 2018-01-17 01:46:15,022+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-1) WFLYSRV0027: Starting deployment of "restapi.war" (runtime-name: "restapi.war") 2018-01-17 01:46:15,022+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-5) WFLYSRV0027: Starting deployment of "rhev.ear" (runtime-name: "rhev.ear") 2018-01-17 01:46:15,022+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-6) WFLYSRV0027: Starting deployment of "apidoc.war" (runtime-name: "apidoc.war") 2018-01-17 01:46:15,022+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-4) WFLYSRV0027: Starting deployment of "engine.ear" (runtime-name: "engine.ear") 2018-01-17 01:46:15,022+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-3) WFLYSRV0027: Starting deployment of "ovirt-web-ui.war" (runtime-name: "ovirt-web-ui.war") 2018-01-17 01:46:15,035+05 INFO [org.jboss.as.remoting] (MSC service thread 1-8) WFLYRMT0001: Listening on 127.0.0.1:8707 2018-01-17 01:46:15,064+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-6) WFLYSRV0207: Starting subdeployment (runtime-name: "rhev.war") 2018-01-17 01:46:15,532+05 INFO [org.wildfly.security] (MSC service thread 1-3) ELY00001: WildFly Elytron version 1.1.7.Final-redhat-1 2018-01-17 01:46:15,823+05 INFO [org.wildfly.extension.undertow] (ServerService Thread Pool -- 47) WFLYUT0021: Registered web context: '/ovirt-engine/web-ui' for server 'default-server' 2018-01-17 01:46:15,823+05 INFO [org.wildfly.extension.undertow] (ServerService Thread Pool -- 45) WFLYUT0021: Registered web context: '/ovirt-engine/rhev' for server 'default-server' 2018-01-17 01:46:15,823+05 INFO [org.wildfly.extension.undertow] (ServerService Thread Pool -- 43) WFLYUT0021: Registered web context: '/ovirt-engine/apidoc' for server 'default-server' 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-5) WFLYSRV0207: Starting subdeployment (runtime-name: "bll.jar") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-3) WFLYSRV0207: Starting subdeployment (runtime-name: "userportal.war") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-5) WFLYSRV0207: Starting subdeployment (runtime-name: "enginesso.war") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-4) WFLYSRV0207: Starting subdeployment (runtime-name: "welcome.war") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-8) WFLYSRV0207: Starting subdeployment (runtime-name: "docs.war") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-7) WFLYSRV0207: Starting subdeployment (runtime-name: "root.war") 2018-01-17 01:46:15,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-6) WFLYSRV0207: Starting subdeployment (runtime-name: "services.war") 2018-01-17 01:46:16,834+05 INFO [org.jboss.as.server.deployment] (MSC service thread 1-1) WFLYSRV0207: Starting subdeployment (runtime-name: "webadmin.war") 2018-01-17 01:46:17,739+05 WARN [org.jboss.as.dependency.unsupported] (MSC service thread 1-7) WFLYSRV0019: Deployment "deployment.engine.ear" is using an unsupported module ("org.dom4j") which may be changed or removed in future versions without notice. """.strip() UI_LOG = """ 2018-01-24 05:31:26,243+05 ERROR [org.ovirt.engine.ui.frontend.server.gwt.OvirtRemoteLoggingService] (default task-134) [] Permutation name: C068E8B2E40A504D3054A1BDCF2A72BB 2018-01-24 05:32:26,243+05 ERROR [org.ovirt.engine.ui.frontend.server.gwt.OvirtRemoteLoggingService] (default task-134) [] Uncaught exception: com.google.gwt.core.client.JavaScriptException: (TypeError) """.strip() # We cannot test this at the moment as LogFileOutput cannot read continuation multi line. Please see open issue#1256 CONSOLE_LOG = """ 2018-08-01 09:15:14 Full thread dump OpenJDK 64-Bit Server VM (25.181-b13 mixed mode): "ServerService Thread Pool -- 61" #118 prio=5 os_prio=0 tid=0x0000000007f1d000 nid=0x68c waiting on condition [0x00007f716d6bc000] java.lang.Thread.State: TIMED_WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x00000006c7fc5480> (a java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) at org.jboss.threads.JBossThread.run(JBossThread.java:320) "ServerService Thread Pool -- 60" #117 prio=5 os_prio=0 tid=0x000000000604c800 nid=0x689 waiting on condition [0x00007f716d7bd000] java.lang.Thread.State: WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x00000006c7fc5480> (a java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1088) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) at org.jboss.threads.JBossThread.run(JBossThread.java:320) "xnio-file-watcher[Watcher for /usr/share/ovirt-engine/branding/rhv-2.brand/applications/rhv.ear/rhv.war/]-0" #98 daemon prio=5 os_prio=0 tid=0x00000000064a9000 nid=0x66d waiting on condition [0x00007f716e4a2000] java.lang.Thread.State: WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x00000006c8bf3c28> (a java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039) at java.util.concurrent.LinkedBlockingDeque.takeFirst(LinkedBlockingDeque.java:492) at java.util.concurrent.LinkedBlockingDeque.take(LinkedBlockingDeque.java:680) at sun.nio.fs.AbstractWatchService.take(AbstractWatchService.java:118) at org.xnio.nio.WatchServiceFileSystemWatcher.run(WatchServiceFileSystemWatcher.java:86) at java.lang.Thread.run(Thread.java:748) "Thread-61" #97 daemon prio=5 os_prio=0 tid=0x00000000064a8800 nid=0x66b runnable [0x00007f716e5a3000] java.lang.Thread.State: RUNNABLE at sun.nio.fs.LinuxWatchService.poll(Native Method) at sun.nio.fs.LinuxWatchService.access$600(LinuxWatchService.java:47) at sun.nio.fs.LinuxWatchService$Poller.run(LinuxWatchService.java:314) at java.lang.Thread.run(Thread.java:748) "xnio-file-watcher[Watcher for /usr/share/ovirt-web-ui/ovirt-web-ui.war/]-0" #96 daemon prio=5 os_prio=0 tid=0x000000000639c800 nid=0x66a waiting on condition [0x00007f716e6a4000] java.lang.Thread.State: WAITING (parking) at sun.misc.Unsafe.park(Native Method) - parking to wait for <0x00000006c8bc5850> (a java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject) at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039) at java.util.concurrent.LinkedBlockingDeque.takeFirst(LinkedBlockingDeque.java:492) at java.util.concurrent.LinkedBlockingDeque.take(LinkedBlockingDeque.java:680) at sun.nio.fs.AbstractWatchService.take(AbstractWatchService.java:118) at org.xnio.nio.WatchServiceFileSystemWatcher.run(WatchServiceFileSystemWatcher.java:86) at java.lang.Thread.run(Thread.java:748) "Thread-62" #95 daemon prio=5 os_prio=0 tid=0x0000000005933800 nid=0x669 runnable [0x00007f716e7a5000] java.lang.Thread.State: RUNNABLE at sun.nio.fs.LinuxWatchService.poll(Native Method) at sun.nio.fs.LinuxWatchService.access$600(LinuxWatchService.java:47) at sun.nio.fs.LinuxWatchService$Poller.run(LinuxWatchService.java:314) at java.lang.Thread.run(Thread.java:748) """.strip() ENGINE_LOG = """ 2018-08-06 04:06:33,229+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engine' is using 0 threads out of 500, 8 threads waiting for tasks and 0 tasks in queue. 2018-08-06 04:06:33,229+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engineScheduled' is using 0 threads out of 100, 100 threads waiting for tasks. 2018-08-06 04:06:33,229+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engineThreadMonitoring' is using 1 threads out of 1, 0 threads waiting for tasks. 2018-08-06 04:06:33,229+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'hostUpdatesChecker' is using 0 threads out of 5, 5 threads waiting for tasks. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'commandCoordinator' is using 0 threads out of 10, 2 threads waiting for tasks. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'default' is using 0 threads out of 1, 5 threads waiting for tasks. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engine' is using 0 threads out of 500, 8 threads waiting for tasks and 0 tasks in queue. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engineScheduled' is using 0 threads out of 100, 100 threads waiting for tasks. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engineThreadMonitoring' is using 1 threads out of 1, 0 threads waiting for tasks. 2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'hostUpdatesChecker' is using 0 threads out of 5, 5 threads waiting for tasks. 2018-08-06 04:26:33,233+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'commandCoordinator' is using 0 threads out of 10, 2 threads waiting for tasks. 2018-08-06 04:26:33,233+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'default' is using 0 threads out of 1, 5 threads waiting for tasks. 2018-08-06 04:26:33,233+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'engine' is using 0 threads out of 500, 8 threads waiting for tasks and 0 tasks in queue. 2018-08-22 00:16:14,357+05 INFO [org.ovirt.engine.core.vdsbroker.vdsbroker.HotUnplugLeaseVDSCommand] (default task-133) [e3bc976c-bc3e-4b41-807f-3a518169ad18] START, HotUnplugLeaseVDSCommand(HostName = example.com, LeaseVDSParameters:{hostId='bfa308ab-5add-4ad7-8f1c-389cb8dcf703', vmId='789489a3-be62-40e4-b13e-beb34ba5ff93'}), log id: 7a634963 """.strip() BOOT_LOG = """ 03:46:17,790 INFO [org.jboss.modules] JBoss Modules version 1.6.4.Final-redhat-1 03:46:18,067 INFO [org.jboss.msc] JBoss MSC version 1.2.7.SP1-redhat-1 03:46:18,181 INFO [org.jboss.as] WFLYSRV0049: JBoss EAP 7.1.3.GA (WildFly Core 3.0.16.Final-redhat-1) starting 03:46:19,126 INFO [org.jboss.as.controller.management-deprecated] WFLYCTL0028: Attribute 'security-realm' in the resource at address '/core-service=management/management-interface=native-interface' is deprecated, and may be removed in future version. See the attribute description in the output of the read-resource-description operation to learn more about the deprecation. 03:46:19,128 INFO [org.jboss.as.controller.management-deprecated] WFLYCTL0028: Attribute 'security-realm' in the resource at address '/core-service=management/management-interface=http-interface' is deprecated, and may be removed in future version. See the attribute description in the output of the read-resource-description operation to learn more about the deprecation. 03:46:19,208 INFO [org.jboss.as.server.deployment.scanner] WFLYDS0004: Found restapi.war in deployment directory. To trigger deployment create a file called restapi.war.dodeploy 03:46:19,208 INFO [org.jboss.as.server.deployment.scanner] WFLYDS0004: Found engine.ear in deployment directory. To trigger deployment create a file called engine.ear.dodeploy 03:46:19,208 INFO [org.jboss.as.server.deployment.scanner] WFLYDS0004: Found ovirt-web-ui.war in deployment directory. To trigger deployment create a file called ovirt-web-ui.war.dodeploy 03:46:19,208 INFO [org.jboss.as.server.deployment.scanner] WFLYDS0004: Found apidoc.war in deployment directory. To trigger deployment create a file called apidoc.war.dodeploy 03:46:19,208 INFO [org.jboss.as.server.deployment.scanner] WFLYDS0004: Found rhv.ear in deployment directory. To trigger deployment create a file called rhv.ear.dodeploy 03:46:19,238 INFO [org.jboss.as.server] WFLYSRV0039: Creating http management service using socket-binding (management) 03:46:19,242 INFO [org.xnio] XNIO version 3.5.5.Final-redhat-1 03:46:19,250 INFO [org.xnio.nio] XNIO NIO Implementation Version 3.5.5.Final-redhat-1 """.strip() def test_server_log(): server_log = ovirt_engine_log.ServerLog(context_wrap(SERVER_LOG)) assert 'is using an unsupported module' in server_log assert len(list(server_log.get_after(datetime(2018, 1, 17, 1, 46, 16, 0)))) == 2 matched_line = '2018-01-17 01:46:17,739+05 WARN [org.jboss.as.dependency.unsupported] (MSC service thread 1-7) WFLYSRV0019: Deployment "deployment.engine.ear" is using an unsupported module ("org.dom4j") which may be changed or removed in future versions without notice.' assert server_log.get('WARN')[-1].get('raw_message') == matched_line sec_lines = server_log.get('org.wildfly.security') assert len(sec_lines) == 1 assert sec_lines[0]['level'] == 'INFO' def test_ui_log(): ui_log = ovirt_engine_log.UILog(context_wrap(UI_LOG)) assert 'Permutation name' in ui_log assert len(list(ui_log.get_after(datetime(2018, 1, 24, 5, 31, 26, 0)))) == 2 exception_lines = ui_log.get('Uncaught exception') assert len(exception_lines) == 1 assert exception_lines[0].get('procname') == 'org.ovirt.engine.ui.frontend.server.gwt.OvirtRemoteLoggingService' assert exception_lines[0].get('level') == 'ERROR' def test_engine_log(): engine_log = ovirt_engine_log.EngineLog(context_wrap(ENGINE_LOG)) assert "Thread pool 'engine'" in engine_log assert len(list(engine_log.get_after(datetime(2018, 8, 6, 4, 16, 33, 0)))) == 10 matched_line = "2018-08-06 04:16:33,231+05 INFO [org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService] (EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool 'hostUpdatesChecker' is using 0 threads out of 5, 5 threads waiting for tasks." assert engine_log.get('hostUpdatesChecker')[-1].get('raw_message') == matched_line assert engine_log.get('vdsbroker')[-1].get('procname') == 'org.ovirt.engine.core.vdsbroker.vdsbroker.HotUnplugLeaseVDSCommand' def test_boot_log(): boot_log = ovirt_engine_log.BootLog(context_wrap(BOOT_LOG)) assert "Creating http management service using socket-binding" in boot_log xnio_lines = boot_log.get('xnio.nio') assert len(xnio_lines) == 1 assert xnio_lines[0].get('procname') == 'org.xnio.nio' assert xnio_lines[0].get('level') == 'INFO' assert xnio_lines[0].get('message') == 'XNIO NIO Implementation Version 3.5.5.Final-redhat-1' log_line = '2018-01-17 INFO [org.jboss.as.server.deployment] (MSC service thread 1-1) WFLYSRV0027: Starting deployment of "restapi.war" (runtime-name: "restapi.war")' boot_log = ovirt_engine_log.BootLog(context_wrap(log_line)) assert "restapi" in boot_log assert boot_log.get('restapi')[0]['raw_message'] == log_line def test_documentation(): failed_count, tests = doctest.testmod( ovirt_engine_log, globs={'server_log': ovirt_engine_log.ServerLog(context_wrap(SERVER_LOG)), 'boot_log': ovirt_engine_log.BootLog(context_wrap(BOOT_LOG)), 'engine_log': ovirt_engine_log.EngineLog(context_wrap(ENGINE_LOG)), 'ui_log': ovirt_engine_log.UILog(context_wrap(UI_LOG))} ) assert failed_count == 0
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/tests/test_ovirt_engine_log.py
""" Configuration File Permissions parsers ====================================== Parsers included in this module are: SshdConfigPerms - command ``/bin/ls -l /etc/ssh/sshd_config`` ------------------------------------------------------------- Grub1ConfigPerms - command ``/bin/ls -l /boot/grub/grub.conf`` -------------------------------------------------------------- Grub2ConfigPerms - command ``/bin/ls -l /boot/grub2/grub.cfg`` -------------------------------------------------------------- """ from insights.core import CommandParser from insights.core.plugins import parser from insights.specs import Specs from insights.util.file_permissions import FilePermissions class FilePermissionsParser(CommandParser, FilePermissions): """ Base class for ``SshdConfigPerms``, ``Grub1ConfigPerms`` and ``Grub2ConfigPerms`` classes. Attributes: line (string): the line from the command output """ def __init__(self, context): self.line = "" CommandParser.__init__(self, context) FilePermissions.__init__(self, self.line) def parse_content(self, content): non_empty_lines = [line for line in content if line] # get rid of blank lines self.line = non_empty_lines[0] @parser(Specs.sshd_config_perms) class SshdConfigPerms(FilePermissionsParser): """ Class for parsing ``/bin/ls -l /etc/ssh/sshd_config`` command. Sample output of this command is:: -rw-------. 1 root root 4179 Dec 1 2014 /etc/ssh/sshd_config Examples: >>> type(sshd_perms) <class 'insights.parsers.config_file_perms.SshdConfigPerms'> >>> sshd_perms.line '-rw-------. 1 root root 4179 Dec 1 2014 /etc/ssh/sshd_config' """ def __init__(self, context): super(SshdConfigPerms, self).__init__(context) @parser(Specs.grub1_config_perms) class Grub1ConfigPerms(FilePermissionsParser): """ Class for parsing ``/bin/ls -l /boot/grub/grub.conf`` command. Sample output of this command is:: -rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub/grub.conf Examples: >>> type(grub1_perms) <class 'insights.parsers.config_file_perms.Grub1ConfigPerms'> >>> grub1_perms.line '-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub/grub.conf' """ def __init__(self, context): super(Grub1ConfigPerms, self).__init__(context) @parser(Specs.grub_config_perms) class Grub2ConfigPerms(FilePermissionsParser): """ Class for parsing ``/bin/ls -l /boot/grub2/grub.cfg`` command. Sample output of this command is:: -rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg Examples: >>> type(grub2_perms) <class 'insights.parsers.config_file_perms.Grub2ConfigPerms'> >>> grub2_perms.line '-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg' """ def __init__(self, context): super(Grub2ConfigPerms, self).__init__(context)
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/config_file_perms.py
""" PciRportTargetDiskPath ====================== Module for parsing the output of command ``find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f``. """ from insights.parsers import ParseException, SkipException from insights import parser, CommandParser from insights.specs import Specs @parser(Specs.pci_rport_target_disk_paths) class PciRportTargetDiskPaths(CommandParser): """ Class for parsing ``find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f`` command output. Typical output of command ``find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f`` with the filter of 'block' looks like:: /sys/devices/pci0000:00/0000:00:01.0/0000:04:00.6/host1/rport-1:0-1/target1:0:0/1:0:0:0/block/sdb/stat /sys/devices/pci0000:00/0000:00:01.0/0000:04:00.7/host2/rport-2:0-2/target2:0:0/2:0:0:0/block/sdc/stat /sys/devices/pci0000:00/0000:00:02.2/0000:02:00.0/host0/target0:1:0/0:1:0:0/block/sda/stat The Original data parsed looks like:: [ { 'target': 'target1:0:0', 'devnode': 'sdb', 'host_channel_id_lun': '1:0:0:0', 'pci_id': '0000:04:00.6', 'host': 'host1', 'rport': 'rport-1:0-1' }, { 'target': 'target2:0:0', 'devnode': 'sdc', 'host_channel_id_lun': '2:0:0:0', 'pci_id': '0000:04:00.7', 'host': 'host2', 'rport': 'rport-2:0-2' }, { 'target': 'target0:1:0', 'devnode': 'sda', 'host_channel_id_lun': '0:1:0:0', 'pci_id': '0000:02:00.0', 'host': 'host0', } ] Examples: >>> type(pd) <class 'insights.parsers.pci_rport_target_disk_paths.PciRportTargetDiskPaths'> >>> pd.pci_id ['0000:02:00.0', '0000:04:00.6', '0000:04:00.7'] >>> pd.host ['host0', 'host1', 'host2'] >>> pd.target ['target0:1:0', 'target1:0:0', 'target2:0:0'] >>> pd.host_channel_id_lun ['0:1:0:0', '1:0:0:0', '2:0:0:0'] >>> pd.devnode ['sda', 'sdb', 'sdc'] Raises: ParseException: Input content is not available to parse SkipException: Input content is empty Attributes: path_list (list): the result parsed """ @property def pci_id(self): """ The all pci_id(s) from parsed content. Returns: list: pci id """ return sorted(self.__pci_id_attributes) @property def devnode(self): """ The all devicenode(s) from parsed content. Returns: list: device nodes """ return sorted(self.__devnode_attributes) @property def host(self): """ The all host(s) from parsed content. Returns: list: hosts """ return sorted(self.__host_attributes) @property def rport(self): """ The all rport(s) from parsed content. Returns: list: rports """ return sorted(self.__rport_attributes) @property def target(self): """ The all target(s) from parsed content. Returns: list: targets """ return sorted(self.__target_attributes) @property def host_channel_id_lun(self): """ The all host_channel_id_lun(s) from parsed content Returns: list: host_channel_id_lun """ return sorted(self.__host_channel_id_lun_attributes) def parse_content(self, content): EMPTY = "Input content is empty" BADWD = "No useful data parsed in line: '{0}'" KEY_MAP = [ # key, required chars, relative index {'key': 'host', 'chars': 'host', 'idx': 0}, {'key': 'pci_id', 'chars': 'pci', 'idx': 2}, {'key': 'rport', 'chars': 'rport-', 'idx': 0}, {'key': 'target', 'chars': 'target', 'idx': 0}, {'key': 'devnode', 'chars': 'block', 'idx': 1}, {'key': 'host_channel_id_lun', 'chars': 'block', 'idx': -1}, ] if not content: raise SkipException(EMPTY) pci = [] self.__host_attributes = set() self.__rport_attributes = set() self.__target_attributes = set() self.__pci_id_attributes = set() self.__devnode_attributes = set() self.__host_channel_id_lun_attributes = set() for line in content: line_sp = list(filter(None, line.strip().split('/'))) temp_pci = {} for i, l in enumerate(line_sp): for km in KEY_MAP: if l.startswith(km['chars']): temp_pci[km['key']] = line_sp[i + km['idx']] len_of_tp = len(temp_pci) if len_of_tp == 6 or (len_of_tp == 5 and 'rport' not in temp_pci): pci.append(temp_pci) self.__host_attributes.add(temp_pci['host']) if temp_pci.get('rport'): self.__rport_attributes.add(temp_pci['rport']) self.__target_attributes.add(temp_pci['target']) self.__pci_id_attributes.add(temp_pci['pci_id']) self.__devnode_attributes.add(temp_pci['devnode']) self.__host_channel_id_lun_attributes.add(temp_pci['host_channel_id_lun']) else: raise ParseException(BADWD.format(line)) self.path_list = pci
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/pci_rport_target_disk_paths.py
from insights.core import JSONParser from insights.core.plugins import parser from insights.specs import Specs @parser(Specs.tags) class Tags(JSONParser): """ Class for parsing the content of ``tags.json``.""" pass
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/tags.py
""" InitProcessCgroup - File ``/proc/1/cgroup`` =========================================== This parser reads the content of ``/proc/1/cgroup``. This file shows the cgroup detail of init process. The format of the content is like key-value. We can also use this info to check if the archive is from container or host. """ from .. import parser, CommandParser, LegacyItemAccess from insights.specs import Specs @parser(Specs.init_process_cgroup) class InitProcessCgroup(CommandParser, LegacyItemAccess): """ Class ``InitProcessCgroup`` parses the content of the ``/proc/1/cgroup``. Attributes: is_container (bool): It is used to check if a archive is from host or container. Return True if the archive is from container. A small sample of the content of this file looks like:: 11:hugetlb:/ 10:memory:/ 9:devices:/ 8:pids:/ 7:perf_event:/ 6:net_prio,net_cls:/ 5:blkio:/ 4:freezer:/ 3:cpuacct,cpu:/ 2:cpuset:/ 1:name=systemd:/ Examples: >>> type(cgroupinfo) <class 'insights.parsers.init_process_cgroup.InitProcessCgroup'> >>> cgroupinfo["memory"] ["10", "/"] >>> cgroupinfo.is_container False """ def parse_content(self, content): self.data = {} self.is_container = False for line in content: values = line.split(":") self.data[values[1]] = [values[0], values[2]] if "system.slice/docker-" in values[2] or 'machine.slice/libpod-' in values[2]: self.is_container = True
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/init_process_cgroup.py
from insights.parsers.neutron_ovs_agent_log import NeutronOVSAgentLog from insights.tests import context_wrap from datetime import datetime LOG = """ 2016-11-09 14:39:25.348 3153 WARNING oslo_config.cfg [-] Option "rabbit_password" from group "oslo_messaging_rabbit" is deprecated for removal. Its value may be silently ignored in the future. 2016-11-09 14:39:25.348 3153 WARNING oslo_config.cfg [-] Option "rabbit_userid" from group "oslo_messaging_rabbit" is deprecated for removal. Its value may be silently ignored in the future. 2016-11-09 14:39:25.352 3153 INFO ryu.base.app_manager [-] loading app neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp 2016-11-09 14:39:27.171 3153 INFO ryu.base.app_manager [-] loading app ryu.app.ofctl.service 2016-11-09 14:39:27.190 3153 INFO ryu.base.app_manager [-] loading app ryu.controller.ofp_handler """ def test_neutron_ovs_agent_log(): log = NeutronOVSAgentLog(context_wrap(LOG)) assert len(log.get("WARNING")) == 2 assert len(list(log.get_after(datetime(2016, 11, 9, 14, 39, 26)))) == 2
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/tests/test_neutron_ovs_agent_log.py
from .. import XMLParser, parser from insights.specs import Specs @parser(Specs.rhn_entitlement_cert_xml) class RHNCertConf(XMLParser): """Class to parse the xml files ``rhn-entitlement-cert.xml*`` Attributes: data (dict): A dict likes { 'product': 'RHN-SATELLITE-001' 'satellite-version': '5.2' 'signature': '-----BEGIN PGP SIGNATURE-----....' 'channel-families': { 'rhel-cluster': {'quantity':'10'} 'sam-rhel-server-6': {'quantity':'102', 'flex':'0'} ... } ... } --- And there may be patterns of "rhn_entitlement_cert.xml" files on the host, you can use the 'file_name' attribute to check where the settings are actually gotten from. E.g: --- rhn_certs = shared[rhn_cert] for cert in rhn_certs: if cert.file_name == 'rhn_entitlement_cert.xml': cf = cert.get('channel_families') ... ---Sample--- <?xml version="1.0" encoding="UTF-8"?> <rhn-cert version="0.1"> <rhn-cert-field name="product">RHN-SATELLITE-001</rhn-cert-field> <rhn-cert-field name="owner">Clay's Precious Satellite</rhn-cert-field> <rhn-cert-field name="issued">2005-01-11 00:00:00</rhn-cert-field> <rhn-cert-field name="expires">2005-03-11 00:00:00</rhn-cert-field> <rhn-cert-field name="slots">30</rhn-cert-field> <rhn-cert-field name="provisioning-slots">30</rhn-cert-field> <rhn-cert-field name="nonlinux-slots">30</rhn-cert-field> <rhn-cert-field name="channel-families" quantity="10" family="rhel-cluster"/> <rhn-cert-field name="channel-families" quantity="30" family="rhel-ws-extras"/> <rhn-cert-field name="channel-families" quantity="10" family="rhel-gfs"/> <rhn-cert-field name="channel-families" quantity="10" family="rhel-es-extras"/> <rhn-cert-field name="channel-families" quantity="40" family="rhel-as"/> <rhn-cert-field name="channel-families" quantity="30" family="rhn-tools"/> <rhn-cert-field name="channel-families" quantity="102" flex="0" family="sam-rhel-server-6"/> <rhn-cert-field name="channel-families" quantity="102" flex="51" family="cf-tools-5-beta"/> <rhn-cert-field name="satellite-version">5.2</rhn-cert-field> <rhn-cert-field name="generation">2</rhn-cert-field> <rhn-cert-signature> -----BEGIN PGP SIGNATURE----- Version: Crypt::OpenPGP 1.03 iQBGBAARAwAGBQJCAG7yAAoJEJ5yna8GlHkysOkAn07qmlUrkGKs7/5yb8H/nboG mhHkAJ9wdmqOeKfcBa3IUDL53oNMEBP/dg== =0Kv7 -----END PGP SIGNATURE----- </rhn-cert-signature> </rhn-cert> """ def parse_dom(self): rhn_cert = {} # ignore empty xml file channel_familes = {} for field in self.dom.findall(".//rhn-cert-field"): family = field.get('family') if family: channel_familes[family] = dict( (k, v) for k, v in field.items() if k not in ('name', 'family')) elif field.text: rhn_cert[field.get('name')] = field.text # for all channel families rhn_cert['channel-families'] = channel_familes singature = self.dom.findall(".//rhn-cert-signature") rhn_cert['signature'] = singature[0].text return rhn_cert
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/parsers/rhn_entitlement_cert_xml.py
from insights.client.auto_config import set_auto_configuration from mock.mock import Mock, patch @patch("insights.client.auto_config.InsightsConnection") def test_sat_branch_info_called(connection): ''' When is_satellite is True, means we're on sat. get_branch_info should be called. ''' config = Mock(base_url=None, upload_url=None, legacy_upload=False) set_auto_configuration(config, 'test.com:443/redhat_access', 'some_cert', None, True, False) connection.return_value.get_branch_info.assert_called_once() @patch("insights.client.auto_config.InsightsConnection") def test_rhsm_branch_info_not_called(connection): ''' When is_satellite is False, means we're on direct RHSM. get_branch_info should not be called. ''' config = Mock(base_url=None, upload_url=None, legacy_upload=False) set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) connection.return_value.get_branch_info.assert_not_called()
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/tests/client/auto_config/test_branch_info_call.py
import requests import json from insights.client.connection import InsightsConnection from mock.mock import MagicMock, Mock, patch @patch("insights.client.connection.generate_machine_id", return_value='xxxxxx') @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") def test_registration_check_ok_reg(get_proxies, _init_session, _): ''' Request completed OK, registered Returns True ''' config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) res = requests.Response() res._content = json.dumps({'unregistered_at': None}) res.status_code = 200 conn.session.get = MagicMock(return_value=res) assert conn.api_registration_check() @patch("insights.client.connection.generate_machine_id", return_value='xxxxxx') @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") def test_registration_check_ok_reg_then_unreg(get_proxies, _init_session, _): ''' Request completed OK, was once registered but has been unregistered Returns the date it was unregistered ''' config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) res = requests.Response() res._content = json.dumps({'unregistered_at': '2019-04-10'}) res.status_code = 200 conn.session.get = MagicMock(return_value=res) assert conn.api_registration_check() == '2019-04-10' @patch("insights.client.connection.generate_machine_id", return_value='xxxxxx') @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") def test_registration_check_ok_unreg(get_proxies, _init_session, _): ''' Request completed OK, has never been registered Returns None ''' config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) res = requests.Response() res._content = json.dumps({}) res.status_code = 404 conn.session.get = MagicMock(return_value=res) assert conn.api_registration_check() is None @patch("insights.client.connection.generate_machine_id", return_value='xxxxxx') @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") def test_registration_check_bad_res(get_proxies, _init_session, _): ''' Can't parse response Returns False ''' config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) res = requests.Response() res._content = 'zSDFasfghsRGH' res.status_code = 500 conn.session.get = MagicMock(return_value=res) assert conn.api_registration_check() is False @patch("insights.client.connection.generate_machine_id", return_value='xxxxxx') @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") @patch("insights.client.connection.InsightsConnection.test_connection") def test_registration_check_conn_error(test_connection, get_proxies, _init_session, _): ''' Can't connect, run connection test Returns False ''' config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) conn.session.get = MagicMock() conn.session.get.side_effect = requests.ConnectionError() assert conn.api_registration_check() is False test_connection.assert_called_once()
import pytest import doctest from insights.parsers import podman_inspect, SkipException from insights.tests import context_wrap PODMAN_CONTAINER_INSPECT = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", "Args": [ "--single-child", "--", "kolla_start" ], "State": { "OciVersion": "1.0.1-dev", "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 6606, "ExitCode": 0, "Error": "", "StartedAt": "2019-09-06T19:16:08.066138727Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504", "ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Rootfs": "", "ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf", "HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname", "HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts", "StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata", "LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log", "Name": "gnocchi_metricd", "RestartCount": 0, "Driver": "overlay", "MountLabel": "system_u:object_r:container_file_t:s0:c514,c813", "ProcessLabel": "system_u:system_r:container_t:s0:c514,c813", "AppArmorProfile": "", "EffectiveCaps": null, "BoundingCaps": [ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE" ], "ExecIDs": [], "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged", "UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff", "WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work" } }, "Mounts": [ { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/etc/pki/ca-trust/source/anchors", "type": "bind", "source": "/etc/pki/ca-trust/source/anchors", "options": [ "ro", "rbind", "rprivate" ] }, { "destination": "/var/lib/kolla/config_files/src-ceph", "type": "bind", "source": "/etc/ceph", "options": [ "ro", "rbind", "rprivate" ] } ], "Dependencies": [], "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": [], "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" }, "ExitCommand": [ "/usr/bin/podman", "--root", "/var/lib/containers/storage", "--runroot", "/var/run/containers/storage", "--log-level", "error", "--cgroup-manager", "systemd", "--tmpdir", "/var/run/libpod", "--storage-driver", "overlay", "container", "cleanup", "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" ], "Namespace": "", "IsInfra": false, "HostConfig": { "ContainerIDFile": "", "LogConfig": null, "NetworkMode": "host", "PortBindings": null, "AutoRemove": false, "CapAdd": [], "CapDrop": [], "DNS": [], "DNSOptions": [], "DNSSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "Cgroup": "host", "OomScoreAdj": 0, "PidMode": "", "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [], "UTSMode": "", "UsernsMode": "", "ShmSize": 65536000, "Runtime": "runc", "ConsoleSize": null, "CpuShares": null, "Memory": 0, "NanoCpus": 0, "CgroupParent": "", "BlkioWeight": null, "BlkioWeightDevice": null, "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": null, "CpuQuota": null, "CpuRealtimePeriod": null, "CpuRealtimeRuntime": null, "CpuSetCpus": "", "CpuSetMems": "", "Devices": null, "DiskQuota": 0, "KernelMemory": null, "MemoryReservation": null, "MemorySwap": null, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": [], "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "Tmpfs": [] }, "Config": { "Hostname": "controller-0", "Domainname": "", "User": { "uid": 0, "gid": 0 }, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "dumb-init", "--single-child", "--", "kolla_start" ], "Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1", "Volumes": null, "WorkingDir": "/", "Entrypoint": "dumb-init --single-child --", "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T20:42:03.096048", "com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-gnocchi-metricd-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "config_id": "tripleo_step5", "container_name": "gnocchi_metricd", "description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "managed_by": "paunch", "name": "rhosp15/openstack-gnocchi-metricd", "release": "58", "summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58", "vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "StopSignal": 15 } } ] """.splitlines() PODMAN_IMAGE_INSPECT = """ [ { "Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca", "Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "RepoTags": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1", "192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest" ], "RepoDigests": [ "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df", "192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" ], "Parent": "", "Comment": "", "Created": "2019-08-19T19:39:31.939714Z", "Config": { "User": "rabbitmq", "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Entrypoint": [ "dumb-init", "--single-child", "--" ], "Cmd": [ "kolla_start" ], "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "StopSignal": "SIGTERM" }, "Version": "1.13.1", "Author": "", "Architecture": "amd64", "Os": "linux", "Size": 542316943, "VirtualSize": 542316943, "GraphDriver": { "Name": "overlay", "Data": { "LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff", "MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged", "UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff", "WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work" } }, "RootFS": { "Type": "layers", "Layers": [ "sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3", "sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898", "sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce", "sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f" ] }, "Labels": { "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "batch": "20190819.1", "build-date": "2019-08-19T19:38:18.798307", "com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com", "com.redhat.component": "openstack-rabbitmq-container", "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements", "description": "Red Hat OpenStack Platform 15.0 rabbitmq", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq", "io.openshift.expose-services": "", "io.openshift.tags": "rhosp osp openstack osp-15.0", "maintainer": "Red Hat, Inc.", "name": "rhosp15/openstack-rabbitmq", "release": "64", "summary": "Red Hat OpenStack Platform 15.0 rabbitmq", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64", "vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "15.0" }, "Annotations": {}, "ManifestType": "application/vnd.docker.distribution.manifest.v2+json", "User": "rabbitmq", "History": [ { "created": "2019-07-15T05:10:57.589513378Z", "comment": "Imported from -" }, { "created": "2019-07-15T05:11:04.220661Z" }, { "created": "2019-08-19T19:24:22.99993Z" }, { "created": "2019-08-19T19:39:31.939714Z" } ] } ] """.splitlines() PODMAN_CONTAINER_INSPECT_TRUNCATED = """ [ { "ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda", "Created": "2019-08-21T10:38:34.753548542Z", "Path": "dumb-init", """ def test_podman_object_container_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT)) assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda" assert result.get('NetworkSettings').get('HairpinMode') is False assert result.get('Config').get('Env') == [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=controller-0", "container=oci", "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS", "TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "KOLLA_DISTRO_PYTHON_VERSION=3.6", "KOLLA_BASE_DISTRO=rhel", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ] assert result.get('GraphDriver').get('Name') == 'overlay' def test_podman_object_image_inspect(): result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT)) assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca" assert result.get('Size') == 542316943 assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df" def test_podman_container_inspect_truncated_input(): with pytest.raises(SkipException): podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED)) def test_doc_test(): dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT)) dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT)) env = { 'container': dic, 'image': dii, } failed, total = doctest.testmod(podman_inspect, globs=env) assert failed == 0
RedHatInsights/insights-core
insights/parsers/tests/test_podman_inspect.py
insights/tests/client/connection/test_LEGACY_reg_check.py
# Authors: # Thierry Bordaz <tbordaz@redhat.com> # # Copyright (C) 2019 Red Hat # see file 'COPYING' for use and warranty information # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging from ipalib import Registry, errors from ipalib import Updater from ipapython.dn import DN logger = logging.getLogger(__name__) register = Registry() @register() class update_unhashed_password(Updater): """ DS """ def __remove_update(self, update, key, value): statement = dict(action='remove', attr=key, value=value) update.setdefault('updates', []).append(statement) def __add_update(self, update, key, value): statement = dict(action='add', attr=key, value=value) update.setdefault('updates', []).append(statement) def execute(self, **options): logger.debug("Upgrading unhashed password configuration") ldap = self.api.Backend.ldap2 base_config = DN(('cn', 'config')) try: entry = ldap.get_entry(base_config, ['nsslapd-unhashed-pw-switch']) except errors.NotFound: logger.error("Unhashed password configuration not found") return False, [] config_dn = entry.dn toggle = entry.single_value.get("nsslapd-unhashed-pw-switch") if toggle.lower() not in ['off', 'on', 'nolog']: logger.error("Unhashed password had invalid value '%s'", toggle) # Check if it exists winsync agreements searchfilter = '(objectclass=nsDSWindowsReplicationAgreement)' try: winsync_agmts, _truncated = ldap.find_entries( base_dn=base_config, filter=searchfilter, attrs_list=[] ) except errors.NotFound: logger.debug("Unhashed password this is not a winsync deployment") winsync_agmts = [] update = { 'dn': config_dn, 'updates': [], } if len(winsync_agmts) > 0: # We are running in a winsync environment # Log a warning that changelog will contain sensitive data try: # Check if the new per-backend changelog exists... cldb = ldap.get_entry( DN(('cn', 'changelog'), ('cn', 'userRoot'), ('cn', 'ldbm database'), ('cn', 'plugins'), ('cn', 'config'))) # We have a backend changelog so get the db dir in this case db_entry = ldap.get_entry( DN(('cn', 'userRoot'), ('cn', 'ldbm database'), ('cn', 'plugins'), ('cn', 'config')), ['nsslapd-directory']) cldb = db_entry.single_value.get("nsslapd-directory") logger.warning("This server is configured for winsync, " "the changelog files under %s " "may contain clear text passwords.\n" "Please ensure that these files can be accessed" " only by trusted accounts.\n", cldb) except errors.NotFound: # Did not find backend changelog, check the global changelog try: cldb_e = ldap.get_entry( DN(('cn', 'changelog5'), ('cn', 'config')), ['nsslapd-changelogdir']) cldb = cldb_e.single_value.get("nsslapd-changelogdir") logger.warning("This server is configured for winsync, " "the changelog files under %s " "may contain clear text passwords.\n" "Please ensure that these files can be " "accessed only by trusted accounts.\n", cldb) except errors.NotFound: logger.warning("This server is configured for winsync, " "the changelog files may contain " "clear text passwords.\n" "Please ensure that these files can be " "accessed only by trusted accounts.\n") if toggle.lower() == 'on': # The current DS configuration already logs the # unhashed password updates = [] else: self.__remove_update(update, 'nsslapd-unhashed-pw-switch', toggle) self.__add_update(update, 'nsslapd-unhashed-pw-switch', 'on') updates = [update] else: if toggle.lower() == 'nolog': updates = [] else: self.__remove_update(update, 'nsslapd-unhashed-pw-switch', toggle) self.__add_update(update, 'nsslapd-unhashed-pw-switch', 'nolog') updates = [update] return False, updates
# # Copyright (C) 2016 FreeIPA Contributors see COPYING for license # """ Test suite for creating principals via kadmin.local and modifying their keys """ import os import pytest import tempfile from ipalib import api from ipaserver.install import installutils @pytest.fixture def keytab(): fd, keytab_path = tempfile.mkstemp(suffix='.keytab') os.close(fd) try: yield keytab_path finally: try: os.remove(keytab_path) except OSError: pass @pytest.fixture() def service_in_kerberos_subtree(request): princ = u'svc1/{0.host}@{0.realm}'.format(api.env) installutils.kadmin_addprinc(princ) def fin(): try: installutils.kadmin( 'delprinc -force {}'.format(princ)) except Exception: pass request.addfinalizer(fin) return princ @pytest.fixture() def service_in_service_subtree(request): princ = u'svc2/{0.host}@{0.realm}'.format(api.env) rpcclient = api.Backend.rpcclient was_connected = rpcclient.isconnected() if not was_connected: rpcclient.connect() api.Command.service_add(princ) def fin(): try: api.Command.service_del(princ) except Exception: pass try: if not was_connected: rpcclient.disconnect() except Exception: pass request.addfinalizer(fin) return princ @pytest.fixture(params=["service_in_kerberos_subtree", "service_in_service_subtree"]) def service(request): return request.getfixturevalue(request.param) @pytest.mark.skipif( os.getuid() != 0, reason="kadmin.local is accesible only to root") class TestKadmin: def assert_success(self, command, *args): """ Since kadmin.local returns 0 also when internal errors occur, we have to catch the command's stderr and check that it is empty """ result = command(*args) assert not result.error_output def test_create_keytab(self, service, keytab): """ tests that ktadd command works for both types of services """ self.assert_success( installutils.create_keytab, keytab, service) def test_change_key(self, service, keytab): """ tests that both types of service can have passwords changed using kadmin """ self.assert_success( installutils.create_keytab, keytab, service) self.assert_success( installutils.kadmin, 'change_password -randkey {}'.format(service)) def test_append_key(self, service, keytab): """ Tests that we can create a new keytab for both service types and then append new keys to it """ self.assert_success( installutils.create_keytab, keytab, service) self.assert_success( installutils.create_keytab, keytab, service) def test_getprincs(self): """ tests that kadmin.local getprincs command returns a list of principals """ self.assert_success(installutils.kadmin, 'getprincs')
encukou/freeipa
ipatests/test_ipaserver/test_kadmin.py
ipaserver/install/plugins/update_unhashed_password.py
# -*- coding: utf-8 -*- u"""synergia simulation data operations :copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from pykern.pkcollections import PKDict from pykern.pkdebug import pkdc, pkdlog, pkdp import sirepo.sim_data class SimData(sirepo.sim_data.SimDataBase): @classmethod def fixup_old_data(cls, data): dm = data.models cls._init_models( dm, ( 'beamEvolutionAnimation', 'bunch', 'bunchAnimation', 'bunchTwiss', 'simulationSettings', 'turnComparisonAnimation', 'twissReport', 'twissReport2', ), ) if 'bunchReport' in dm: del dm['bunchReport'] for i in range(1, 5): m = dm['bunchReport{}'.format(i)] = PKDict() cls.update_model_defaults(m, 'bunchReport') if i == 1: m.y = 'xp' elif i == 2: m.x = 'y' m.y = 'yp' elif i == 4: m.x = 'z' m.y = 'zp' cls._organize_example(data) @classmethod def _compute_model(cls, analysis_model, *args, **kwargs): if 'bunchReport' in analysis_model: return 'bunchReport' # twissReport2 and twissReport are compute_models return super(SimData, cls)._compute_model(analysis_model, *args, **kwargs) @classmethod def _compute_job_fields(cls, data, r, compute_model): res = ['beamlines', 'elements'] if 'bunchReport' in r: res += ['bunch', 'simulation.visualizationBeamlineId'] elif r == 'twissReport': res += ['simulation.activeBeamlineId'] elif 'twissReport' in r: res += ['simulation.visualizationBeamlineId'] return res @classmethod def _lib_file_basenames(cls, data): res = [] b = data.models.bunch if b.distribution == 'file': res.append(cls.lib_file_name_with_model_field('bunch', 'particleFile', b.particleFile)) return res
# -*- coding: utf-8 -*- u"""Test sirepo.uri_router :copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function import pytest pytest.importorskip('srwl_bl') def test_not_found(): from pykern.pkdebug import pkdp from pykern.pkunit import pkeq from sirepo import srunit fc = srunit.flask_client() for uri in ('/some random uri', '/srw/wrong-param', '/export-archive'): resp = fc.get(uri) pkeq(404, resp.status_code) def test_uri_for_api(): from sirepo import srunit def t(): from pykern.pkdebug import pkdp from pykern.pkunit import pkeq, pkexcept, pkre, pkeq from sirepo import uri_router import re fc = srunit.flask_client() uri = uri_router.uri_for_api('homePage', params={'path_info': None}) pkre('http://[^/]+/en$', uri) uri = uri_router.uri_for_api( 'homePage', params={'path_info': 'terms.html'}, external=False, ) pkeq('/en/terms.html', uri) with pkexcept(KeyError): uri_router.uri_for_api('notAnApi') with pkexcept('missing parameter'): uri_router.uri_for_api('exportArchive', {'simulation_type': 'srw'}) srunit.test_in_request(t)
mrakitin/sirepo
tests/uri_router_test.py
sirepo/sim_data/synergia.py
# flake8: noqa __docformat__ = "restructuredtext" # Let users know if they're missing any of our hard dependencies hard_dependencies = ("numpy", "pytz", "dateutil") missing_dependencies = [] for dependency in hard_dependencies: try: __import__(dependency) except ImportError as e: missing_dependencies.append(f"{dependency}: {e}") if missing_dependencies: raise ImportError( "Unable to import required dependencies:\n" + "\n".join(missing_dependencies) ) del hard_dependencies, dependency, missing_dependencies # numpy compat from pandas.compat import ( np_version_under1p18 as _np_version_under1p18, is_numpy_dev as _is_numpy_dev, ) try: from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib except ImportError as e: # pragma: no cover # hack but overkill to use re module = str(e).replace("cannot import name ", "") raise ImportError( f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --force' to build the C extensions first." ) from e from pandas._config import ( get_option, set_option, reset_option, describe_option, option_context, options, ) # let init-time option registration happen import pandas.core.config_init from pandas.core.api import ( # dtype Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype, Float32Dtype, Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, DatetimeTZDtype, StringDtype, BooleanDtype, # missing NA, isna, isnull, notna, notnull, # indexes Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, IndexSlice, # tseries NaT, Period, period_range, Timedelta, timedelta_range, Timestamp, date_range, bdate_range, Interval, interval_range, DateOffset, # conversion to_numeric, to_datetime, to_timedelta, # misc Flags, Grouper, factorize, unique, value_counts, NamedAgg, array, Categorical, set_eng_float_format, Series, DataFrame, ) from pandas.core.arrays.sparse import SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets from pandas.core.computation.api import eval from pandas.core.reshape.api import ( concat, lreshape, melt, wide_to_long, merge, merge_asof, merge_ordered, crosstab, pivot, pivot_table, get_dummies, cut, qcut, ) import pandas.api from pandas.util._print_versions import show_versions from pandas.io.api import ( # excel ExcelFile, ExcelWriter, read_excel, # parsers read_csv, read_fwf, read_table, # pickle read_pickle, to_pickle, # pytables HDFStore, read_hdf, # sql read_sql, read_sql_query, read_sql_table, # misc read_clipboard, read_parquet, read_orc, read_feather, read_gbq, read_html, read_xml, read_json, read_stata, read_sas, read_spss, ) from pandas.io.json import _json_normalize as json_normalize from pandas.util._tester import test import pandas.testing import pandas.arrays # use the closest tagged version if possible from pandas._version import get_versions v = get_versions() __version__ = v.get("closest-tag", v["version"]) __git_version__ = v.get("full-revisionid") del get_versions, v # GH 27101 def __getattr__(name): import warnings if name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " "Import from datetime module instead.", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == "np": warnings.warn( "The pandas.np module is deprecated " "and will be removed from pandas in a future version. " "Import numpy directly instead", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {"SparseSeries", "SparseDataFrame"}: warnings.warn( f"The {name} class is removed from pandas. Accessing it from " "the top-level namespace will also be removed in the next version", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == "SparseArray": warnings.warn( "The pandas.SparseArray class is deprecated " "and will be removed from pandas in a future version. " "Use pandas.arrays.SparseArray instead.", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string __doc__ = """ pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. """
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/__init__.py
import numpy as np import pandas as pd from pandas import ( Categorical, DataFrame, Index, Series, Timestamp, ) import pandas._testing as tm from pandas.core.arrays import IntervalArray class TestGetNumericData: def test_get_numeric_data_preserve_dtype(self): # get the numeric data obj = DataFrame({"A": [1, "2", 3.0]}) result = obj._get_numeric_data() expected = DataFrame(index=[0, 1, 2], dtype=object) tm.assert_frame_equal(result, expected) def test_get_numeric_data(self): datetime64name = np.dtype("M8[ns]").name objectname = np.dtype(np.object_).name df = DataFrame( {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")}, index=np.arange(10), ) result = df.dtypes expected = Series( [ np.dtype("float64"), np.dtype("int64"), np.dtype(objectname), np.dtype(datetime64name), ], index=["a", "b", "c", "f"], ) tm.assert_series_equal(result, expected) df = DataFrame( { "a": 1.0, "b": 2, "c": "foo", "d": np.array([1.0] * 10, dtype="float32"), "e": np.array([1] * 10, dtype="int32"), "f": np.array([1] * 10, dtype="int16"), "g": Timestamp("20010102"), }, index=np.arange(10), ) result = df._get_numeric_data() expected = df.loc[:, ["a", "b", "d", "e", "f"]] tm.assert_frame_equal(result, expected) only_obj = df.loc[:, ["c", "g"]] result = only_obj._get_numeric_data() expected = df.loc[:, []] tm.assert_frame_equal(result, expected) df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]}) result = df._get_numeric_data() expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]}) tm.assert_frame_equal(result, expected) df = result.copy() result = df._get_numeric_data() expected = df tm.assert_frame_equal(result, expected) def test_get_numeric_data_mixed_dtype(self): # numeric and object columns df = DataFrame( { "a": [1, 2, 3], "b": [True, False, True], "c": ["foo", "bar", "baz"], "d": [None, None, None], "e": [3.14, 0.577, 2.773], } ) result = df._get_numeric_data() tm.assert_index_equal(result.columns, Index(["a", "b", "e"])) def test_get_numeric_data_extension_dtype(self): # GH#22290 df = DataFrame( { "A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"), "B": Categorical(list("abcabc")), "C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"), "D": IntervalArray.from_breaks(range(7)), } ) result = df._get_numeric_data() expected = df.loc[:, ["A", "C"]] tm.assert_frame_equal(result, expected)
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/tests/frame/methods/test_get_numeric_data.py
import warnings import pytest import pandas as pd import pandas._testing as tm from pandas.tests.extension.base.base import BaseExtensionTests class BaseReduceTests(BaseExtensionTests): """ Reduction specific tests. Generally these only make sense for numeric/boolean operations. """ def check_reduce(self, s, op_name, skipna): result = getattr(s, op_name)(skipna=skipna) expected = getattr(s.astype("float64"), op_name)(skipna=skipna) tm.assert_almost_equal(result, expected) class BaseNoReduceTests(BaseReduceTests): """ we don't define any reductions """ @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" "'Categorical' does not implement reduction|" ) with pytest.raises(TypeError, match=msg): getattr(s, op_name)(skipna=skipna) class BaseNumericReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_numeric_reductions, skipna): op_name = all_numeric_reductions s = pd.Series(data) # min/max with empty produce numpy warnings with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) self.check_reduce(s, op_name, skipna) class BaseBooleanReduceTests(BaseReduceTests): @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series(self, data, all_boolean_reductions, skipna): op_name = all_boolean_reductions s = pd.Series(data) self.check_reduce(s, op_name, skipna)
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/tests/extension/base/reduce.py
from __future__ import annotations from contextlib import suppress from typing import ( TYPE_CHECKING, Any, Hashable, Sequence, ) import warnings import numpy as np from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, isna, ) import pandas.core.common as com from pandas.core.construction import array as pd_array from pandas.core.indexers import ( check_array_indexer, is_empty_indexer, is_exact_shape_match, is_list_like_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) if TYPE_CHECKING: from pandas import ( DataFrame, Series, ) # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice: """ Create an object to more easily perform multi-index slicing. See Also -------- MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. Notes ----- See :ref:`Defined Levels <advanced.shown_levels>` for further info on slicing a MultiIndex. Examples -------- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), ... index=midx, columns=columns) Using the default slice command: >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, 'B0':'B1'], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 """ def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class IndexingMixin: """ Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. """ @property def iloc(self) -> _iLocIndexer: """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>`. See Also -------- DataFrame.iat : Fast integer location scalar accessor. DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = pd.DataFrame(mydict) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** With a scalar integer. >>> type(df.iloc[0]) <class 'pandas.core.series.Series'> >>> df.iloc[0] a 1 b 2 c 3 d 4 Name: 0, dtype: int64 With a list of integers. >>> df.iloc[[0]] a b c d 0 1 2 3 4 >>> type(df.iloc[[0]]) <class 'pandas.core.frame.DataFrame'> >>> df.iloc[[0, 1]] a b c d 0 1 2 3 4 1 100 200 300 400 With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 With a boolean mask the same length as the index. >>> df.iloc[[True, False, True]] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 With a callable, useful in method chains. The `x` passed to the ``lambda`` is the DataFrame being sliced. This selects the rows whose index label even. >>> df.iloc[lambda x: x.index % 2 == 0] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[0, 1] 2 With lists of integers. >>> df.iloc[[0, 2], [1, 3]] b d 0 2 4 2 2000 4000 With `slice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000 """ return _iLocIndexer("iloc", self) @property def loc(self) -> _LocIndexer: """ Access a group of rows and columns by label(s) or a boolean array. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'``. .. warning:: Note that contrary to usual python slices, **both** the start and the stop are included - A boolean array of the same length as the axis being sliced, e.g. ``[True, False, True]``. - An alignable boolean Series. The index of the key will be aligned before masking. - An alignable Index. The Index of the returned selection will be the input. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above) See more at :ref:`Selection by Label <indexing.label>`. Raises ------ KeyError If any items are not found. IndexingError If an indexed key is passed and its index is unalignable to the frame index. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.iloc : Access group of rows and columns by integer position(s). DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. Series.loc : Access group of values using labels. Examples -------- **Getting values** >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 Single label. Note this returns the row as a Series. >>> df.loc['viper'] max_speed 4 shield 5 Name: viper, dtype: int64 List of labels. Note using ``[[]]`` returns a DataFrame. >>> df.loc[['viper', 'sidewinder']] max_speed shield viper 4 5 sidewinder 7 8 Single label for row and column >>> df.loc['cobra', 'shield'] 2 Slice with labels for row and single label for column. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc['cobra':'viper', 'max_speed'] cobra 1 viper 4 Name: max_speed, dtype: int64 Boolean list with the same length as the row axis >>> df.loc[[False, False, True]] max_speed shield sidewinder 7 8 Alignable boolean Series: >>> df.loc[pd.Series([False, True, False], ... index=['viper', 'sidewinder', 'cobra'])] max_speed shield sidewinder 7 8 Index (same behavior as ``df.reindex``) >>> df.loc[pd.Index(["cobra", "viper"], name="foo")] max_speed shield foo cobra 1 2 viper 4 5 Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] max_speed shield sidewinder 7 8 Conditional that returns a boolean Series with column labels specified >>> df.loc[df['shield'] > 6, ['max_speed']] max_speed sidewinder 7 Callable that returns a boolean Series >>> df.loc[lambda df: df['shield'] == 8] max_speed shield sidewinder 7 8 **Setting values** Set value for all items matching the list of labels >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 >>> df max_speed shield cobra 1 2 viper 4 50 sidewinder 7 50 Set value for an entire row >>> df.loc['cobra'] = 10 >>> df max_speed shield cobra 10 10 viper 4 50 sidewinder 7 50 Set value for an entire column >>> df.loc[:, 'max_speed'] = 30 >>> df max_speed shield cobra 30 10 viper 30 50 sidewinder 30 50 Set value for rows matching callable condition >>> df.loc[df['shield'] > 35] = 0 >>> df max_speed shield cobra 30 10 viper 0 0 sidewinder 0 0 **Getting values on a DataFrame with an index that has integer labels** Another example using integers for the index >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=[7, 8, 9], columns=['max_speed', 'shield']) >>> df max_speed shield 7 1 2 8 4 5 9 7 8 Slice with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 **Getting values with a MultiIndex** A number of examples using a DataFrame with a MultiIndex >>> tuples = [ ... ('cobra', 'mark i'), ('cobra', 'mark ii'), ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'), ... ('viper', 'mark ii'), ('viper', 'mark iii') ... ] >>> index = pd.MultiIndex.from_tuples(tuples) >>> values = [[12, 2], [0, 4], [10, 20], ... [1, 4], [7, 1], [16, 36]] >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index) >>> df max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Single label. Note this returns a DataFrame with a single index. >>> df.loc['cobra'] max_speed shield mark i 12 2 mark ii 0 4 Single index tuple. Note this returns a Series. >>> df.loc[('cobra', 'mark ii')] max_speed 0 shield 4 Name: (cobra, mark ii), dtype: int64 Single label for row and column. Similar to passing in a tuple, this returns a Series. >>> df.loc['cobra', 'mark i'] max_speed 12 shield 2 Name: (cobra, mark i), dtype: int64 Single tuple. Note using ``[[]]`` returns a DataFrame. >>> df.loc[[('cobra', 'mark ii')]] max_speed shield cobra mark ii 0 4 Single tuple for the index with a single label for the column >>> df.loc[('cobra', 'mark i'), 'shield'] 2 Slice from index tuple to single label >>> df.loc[('cobra', 'mark i'):'viper'] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 mark iii 16 36 Slice from index tuple to index tuple >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')] max_speed shield cobra mark i 12 2 mark ii 0 4 sidewinder mark i 10 20 mark ii 1 4 viper mark ii 7 1 """ return _LocIndexer("loc", self) @property def at(self) -> _AtIndexer: """ Access a single value for a row/column label pair. Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ KeyError If 'label' does not exist in DataFrame. See Also -------- DataFrame.iat : Access a single value for a row/column pair by integer position. DataFrame.loc : Access a group of rows and columns by label(s). Series.at : Access a single value using a label. Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... index=[4, 5, 6], columns=['A', 'B', 'C']) >>> df A B C 4 0 2 3 5 0 4 1 6 10 20 30 Get value at specified row/column pair >>> df.at[4, 'B'] 2 Set value at specified row/column pair >>> df.at[4, 'B'] = 10 >>> df.at[4, 'B'] 10 Get value within a Series >>> df.loc[5].at['B'] 4 """ return _AtIndexer("at", self) @property def iat(self) -> _iAtIndexer: """ Access a single value for a row/column pair by integer position. Similar to ``iloc``, in that both provide integer-based lookups. Use ``iat`` if you only need to get or set a single value in a DataFrame or Series. Raises ------ IndexError When integer position is out of bounds. See Also -------- DataFrame.at : Access a single value for a row/column label pair. DataFrame.loc : Access a group of rows and columns by label(s). DataFrame.iloc : Access a group of rows and columns by integer position(s). Examples -------- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], ... columns=['A', 'B', 'C']) >>> df A B C 0 0 2 3 1 0 4 1 2 10 20 30 Get value at specified row/column pair >>> df.iat[1, 2] 1 Set value at specified row/column pair >>> df.iat[1, 2] = 10 >>> df.iat[1, 2] 10 Get value within a series >>> df.loc[0].iat[1] 2 """ return _iAtIndexer("iat", self) class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None def __call__(self, axis=None): # we need to return a copy of ourselves new_self = type(self)(self.name, self.obj) if axis is not None: axis = self.obj._get_axis_number(axis) new_self.axis = axis return new_self def _get_setitem_indexer(self, key): """ Convert a potentially-label-based key into a positional indexer. """ if self.name == "loc": self._ensure_listlike_indexer(key) if self.axis is not None: return self._convert_tuple(key, is_setter=True) ax = self.obj._get_axis(0) if isinstance(ax, MultiIndex) and self.name != "iloc": with suppress(TypeError, KeyError, InvalidIndexError): # TypeError e.g. passed a bool return ax.get_loc(key) if isinstance(key, tuple): with suppress(IndexingError): return self._convert_tuple(key, is_setter=True) if isinstance(key, range): return list(key) try: return self._convert_to_indexer(key, axis=0, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise elif "unhashable type" in str(e): raise raise IndexingError(key) from e def _ensure_listlike_indexer(self, key, axis=None, value=None): """ Ensure that a list-like of column labels are all present by adding them if they do not already exist. Parameters ---------- key : list-like of column labels Target labels. axis : key axis if known """ column_axis = 1 # column only exists in 2-dimensional DataFrame if self.ndim != 2: return if isinstance(key, tuple) and len(key) > 1: # key may be a tuple if we are .loc # if length of key is > 1 set key to column part key = key[column_axis] axis = column_axis if ( axis == column_axis and not isinstance(self.obj.columns, MultiIndex) and is_list_like_indexer(key) and not com.is_bool_indexer(key) and all(is_hashable(k) for k in key) ): # GH#38148 keys = self.obj.columns.union(key, sort=False) self.obj._mgr = self.obj._mgr.reindex_axis( keys, axis=0, consolidate=False, only_slice=True ) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._has_valid_setitem_indexer(key) iloc = self if self.name == "iloc" else self.obj.iloc iloc._setitem_with_indexer(indexer, value, self.name) def _validate_key(self, key, axis: int): """ Ensure that key is valid for current indexer. Parameters ---------- key : scalar, slice or list-like Key requested. axis : int Dimension on which the indexing is being made. Raises ------ TypeError If the key (or some element of it) has wrong type. IndexError If the key (or some element of it) is out of bounds. KeyError If the key was not found. """ raise AbstractMethodError(self) def _has_valid_tuple(self, key: tuple): """ Check the key for valid keys across my indexer. """ self._validate_key_length(key) for i, k in enumerate(key): try: self._validate_key(k, i) except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" ) from err def _is_nested_tuple_indexer(self, tup: tuple) -> bool: """ Returns ------- bool """ if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False def _convert_tuple(self, key, is_setter: bool = False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append( self._convert_to_indexer(key, axis=axis, is_setter=is_setter) ) else: keyidx.append(slice(None)) else: self._validate_key_length(key) for i, k in enumerate(key): idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _validate_key_length(self, key: Sequence[Any]) -> None: if len(key) > self.ndim: raise IndexingError("Too many indexers") def _getitem_tuple_same_dim(self, tup: tuple): """ Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim. """ retval = self.obj for i, key in enumerate(tup): if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) # We should never have retval.ndim < self.ndim, as that should # be handled by the _getitem_lowerdim call above. assert retval.ndim == self.ndim return retval def _getitem_lowerdim(self, tup: tuple): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, MultiIndex) and self.name != "iloc": with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) self._validate_key_length(tup) for i, key in enumerate(tup): if is_label_like(key): # We don't need to check for tuples here because those are # caught by the _is_nested_tuple_indexer check above. section = self._getitem_axis(key, axis=i) # We should never have a scalar section here, because # _getitem_lowerdim is only called after a check for # is_scalar_access, which that would be. if section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1 :] else: # Note: the section.ndim == self.ndim check above # rules out having DataFrame here, so we dont need to worry # about transposing. new_key = tup[:i] + tup[i + 1 :] if len(new_key) == 1: new_key = new_key[0] # Slices should return views, but calling iloc/loc with a null # slice returns a new object. if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc return getattr(section, self.name)[new_key] raise IndexingError("not applicable") def _getitem_nested_tuple(self, tup: tuple): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionality here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: if self.name != "loc": # This should never be reached, but lets be explicit about it raise ValueError("Too many indices") if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in tup ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") if self.ndim == 1 or not any(isinstance(x, slice) for x in tup): # GH#10521 Series should reduce MultiIndex dimensions instead of # DataFrame, IndexingError is not raised when slice(None,None,None) # with one row. with suppress(IndexingError): return self._handle_lowerdim_multi_index_axis0(tup) # this is a series with a multi-index specified a tuple of # selectors axis = self.axis or 0 return self._getitem_axis(tup, axis=axis) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 for key in tup[::-1]: if com.is_null_slice(key): axis -= 1 continue obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis -= 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, "ndim"): break return obj def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if type(key) is tuple: key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) if self._is_scalar_access(key): with suppress(KeyError, IndexError, AttributeError): # AttributeError for IntervalTree get_value return self.obj._get_value(*key, takeable=self._takeable) return self._getitem_tuple(key) else: # we by definition only have the 0th axis axis = self.axis or 0 maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key: tuple): raise NotImplementedError() def _getitem_tuple(self, tup: tuple): raise AbstractMethodError(self) def _getitem_axis(self, key, axis: int): raise NotImplementedError() def _has_valid_setitem_indexer(self, indexer) -> bool: raise AbstractMethodError(self) def _getbool_axis(self, key, axis: int): # caller is responsible for ensuring non-None axis labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds = key.nonzero()[0] return self.obj._take_with_is_copy(inds, axis=axis) @doc(IndexingMixin.loc) class _LocIndexer(_LocationIndexer): _takeable: bool = False _valid_types = ( "labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean" ) # ------------------------------------------------------------------- # Key Checks @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: int): # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) # slice of integers (only if in the labels) # boolean not in slice and with boolean index if isinstance(key, bool) and not is_bool_dtype(self.obj.index): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) if isinstance(key, slice) and ( isinstance(key.start, bool) or isinstance(key.stop, bool) ): raise TypeError(f"{key}: boolean values can not be used in a slice") def _has_valid_setitem_indexer(self, indexer) -> bool: return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: # partial string indexing, df.loc['2000', 'A'] # should not be considered scalar return False if not ax.is_unique: return False return True # ------------------------------------------------------------------- # MultiIndex Handling def _multi_take_opportunity(self, tup: tuple) -> bool: """ Check whether there is the possibility to use ``_multi_take``. Currently the limit is that all axes being indexed, must be indexed with list-likes. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- bool Whether the current indexing, can be passed through `_multi_take`. """ if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated return not any(com.is_bool_indexer(x) for x in tup) def _multi_take(self, tup: tuple): """ Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed """ # GH 836 d = { axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, self.obj._AXIS_ORDERS) } return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True) # ------------------------------------------------------------------- def _getitem_iterable(self, key, axis: int): """ Index current object with an iterable collection of keys. Parameters ---------- key : iterable Targeted labels. axis : int Dimension on which the indexing is being made. Raises ------ KeyError If no key was found. Will change in the future to raise if not all keys were found. Returns ------- scalar, DataFrame, or Series: indexed value(s). """ # we assume that not com.is_bool_indexer(key), as that is # handled before we get here. self._validate_key(key, axis) # A collection of keys keyarr, indexer = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers( {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) def _getitem_tuple(self, tup: tuple): with suppress(IndexingError): return self._getitem_lowerdim(tup) # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) return self._getitem_tuple_same_dim(tup) def _get_label(self, label, axis: int): # GH#5667 this will fail if the label is not present in the axis. return self.obj.xs(label, axis=axis) def _handle_lowerdim_multi_index_axis0(self, tup: tuple): # we have an axis0 multi-index, handle or raise axis = self.axis or 0 try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=axis) except (TypeError, InvalidIndexError): # slices are unhashable pass except KeyError as ek: # raise KeyError if number of indexers match # else IndexingError will be raised if self.ndim < len(tup) <= self.obj.index.nlevels: raise ek raise IndexingError("No label returned") def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) if is_iterator(key): key = list(key) labels = self.obj._get_axis(axis) key = labels._get_partial_string_timestamp_match_key(key) if isinstance(key, slice): self._validate_key(key, axis) return self._get_slice_axis(key, axis=axis) elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, "ndim") and key.ndim > 1: raise ValueError("Cannot index with multidimensional key") return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._validate_key(key, axis) return self._get_label(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): """ This is pretty simple as we just have to deal with labels. """ # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) else: # DatetimeIndex overrides Index.slice_indexer and may # return a DatetimeIndex instead of a slice object. return self.obj.take(indexer, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Convert indexing key into something we can use to do actual fancy indexing on a ndarray. Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(key, slice): return labels._convert_slice_indexer(key, kind="loc") # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(key) and not is_int_index if is_scalar(key) or isinstance(labels, MultiIndex): # Otherwise get_loc will raise InvalidIndexError # if we are a label return me try: return labels.get_loc(key) except LookupError: if isinstance(key, tuple) and isinstance(labels, MultiIndex): if len(key) == labels.nlevels: return {"key": key} raise except InvalidIndexError: # GH35015, using datetime as column indices raises exception if not isinstance(labels, MultiIndex): raise except TypeError: pass except ValueError: if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition # always valid return {"key": key} if is_nested_tuple(key, labels): if isinstance(self.obj, ABCSeries) and any( isinstance(k, tuple) for k in key ): # GH#35349 Raise if tuple in tuple for series raise ValueError("Too many indices") return labels.get_locs(key) elif is_list_like_indexer(key): if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) (inds,) = key.nonzero() return inds else: return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(key): return {"key": key} raise def _get_listlike_indexer(self, key, axis: int): """ Transform a list-like of keys into a new index and an indexer. Parameters ---------- key : list-like Targeted labels. axis: int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. Returns ------- keyarr: Index New index (coinciding with 'key' if the axis is unique). values : array-like Indexer for the return object, -1 denotes keys not found. """ ax = self.obj._get_axis(axis) # Have the index compute an indexer or return None # if it cannot handle: indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): # _validate_read_indexer is a no-op if no -1s, so skip return ax[indexer], indexer if ax._index_as_unique: indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) self._validate_read_indexer(keyarr, indexer, axis) return keyarr, indexer def _validate_read_indexer(self, key, indexer, axis: int): """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis : int Dimension on which the indexing is being made. Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values: missing_mask = indexer < 0 missing = (missing_mask).sum() if missing: if missing == len(indexer): axis_name = self.obj._get_axis_name(axis) raise KeyError(f"None of [{key}] are in the [{axis_name}]") ax = self.obj._get_axis(axis) not_found = list(set(key) - set(ax)) raise KeyError(f"{not_found} not in index") @doc(IndexingMixin.iloc) class _iLocIndexer(_LocationIndexer): _valid_types = ( "integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array" ) _takeable = True # ------------------------------------------------------------------- # Key Checks def _validate_key(self, key, axis: int): if com.is_bool_indexer(key): if hasattr(key, "index") and isinstance(key.index, Index): if key.index.inferred_type == "integer": raise NotImplementedError( "iLocation based boolean " "indexing on an integer type " "is not available" ) raise ValueError( "iLocation based boolean indexing cannot use " "an indexable as a mask" ) return if isinstance(key, slice): return elif is_integer(key): self._validate_integer(key, axis) elif isinstance(key, tuple): # a tuple should already have been caught by this point # so don't treat a tuple as a valid indexer raise IndexingError("Too many indexers") elif is_list_like_indexer(key): arr = np.array(key) len_axis = len(self.obj._get_axis(axis)) # check that the key has a numeric dtype if not is_numeric_dtype(arr.dtype): raise IndexError(f".iloc requires numeric indexers, got {arr}") # check that the key does not exceed the maximum size of the index if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): raise IndexError("positional indexers are out-of-bounds") else: raise ValueError(f"Can only index by location with a [{self._valid_types}]") def _has_valid_setitem_indexer(self, indexer) -> bool: """ Validate that a positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally. Returns ------- bool """ if isinstance(indexer, dict): raise IndexError("iloc cannot enlarge its target object") if isinstance(indexer, ABCDataFrame): warnings.warn( "DataFrame indexer for .iloc is deprecated and will be removed in" "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, stacklevel=3, ) if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("iloc cannot enlarge its target object") elif isinstance(i, dict): raise IndexError("iloc cannot enlarge its target object") return True def _is_scalar_access(self, key: tuple) -> bool: """ Returns ------- bool """ # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if len(key) != self.ndim: return False return all(is_integer(k) for k in key) def _validate_integer(self, key: int, axis: int) -> None: """ Check that 'key' is a valid position in the desired axis. Parameters ---------- key : int Requested position. axis : int Desired axis. Raises ------ IndexError If 'key' is not a valid position in axis 'axis'. """ len_axis = len(self.obj._get_axis(axis)) if key >= len_axis or key < -len_axis: raise IndexError("single positional indexer is out-of-bounds") # ------------------------------------------------------------------- def _getitem_tuple(self, tup: tuple): self._has_valid_tuple(tup) with suppress(IndexingError): return self._getitem_lowerdim(tup) return self._getitem_tuple_same_dim(tup) def _get_list_axis(self, key, axis: int): """ Return Series values by list or array of integers. Parameters ---------- key : list-like positional indexer axis : int Returns ------- Series object Notes ----- `axis` can only be zero. """ try: return self.obj._take_with_is_copy(key, axis=axis) except IndexError as err: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, ABCDataFrame): raise IndexError( "DataFrame indexer is not allowed for .iloc\n" "Consider using .loc for automatic alignment." ) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) if is_iterator(key): key = list(key) if isinstance(key, list): key = np.asarray(key) if com.is_bool_indexer(key): self._validate_key(key, axis) return self._getbool_axis(key, axis=axis) # a list of integers elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) # a single integer else: key = item_from_zerodim(key) if not is_integer(key): raise TypeError("Cannot index by location index with a non-integer key") # validate the location self._validate_integer(key, axis) return self.obj._ixs(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: int): # caller is responsible for ensuring non-None axis obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) labels._validate_positional_slice(slice_obj) return self.obj._slice(slice_obj, axis=axis) def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Much simpler as we only have to deal with our valid types. """ return key def _get_setitem_indexer(self, key): # GH#32257 Fall through to let numpy do validation if is_iterator(key): return list(key) return key # ------------------------------------------------------------------- def _setitem_with_indexer(self, indexer, value, name="iloc"): """ _setitem_with_indexer is for setting values on a Series/DataFrame using positional indexers. If the relevant keys are not present, the Series/DataFrame may be expanded. This method is currently broken when dealing with non-unique Indexes, since it goes from positional indexers back to labels when calling BlockManager methods, see GH#12991, GH#22046, GH#15686. """ info_axis = self.obj._info_axis_number # maybe partial set take_split_path = not self.obj._mgr.is_single_block # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if ( not take_split_path and getattr(self.obj._mgr, "blocks", False) and self.ndim > 1 ): # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value blk = self.obj._mgr.blocks[0] take_split_path = not blk._can_hold_element(val) # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref # GH 10360, GH 27841 if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): if isinstance(ax, MultiIndex) and not ( is_integer(i) or com.is_null_slice(i) ): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == info_axis: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like if not len(self.obj): if not is_list_like_indexer(value): raise ValueError( "cannot set a frame with no " "defined index and a scalar" ) self.obj[key] = value return # add a new item with the dtype setup if com.is_null_slice(indexer[0]): # We are setting an entire column self.obj[key] = value else: self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes ) self._setitem_with_indexer(new_indexer, value, name) return # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) # We are expanding the Series/DataFrame values to match # the length of thenew index `labels`. GH#40096 ensure # this is valid even if the index has duplicates. taker = np.arange(len(index) + 1, dtype=np.intp) taker[-1] = -1 reindexers = {i: (labels, taker)} new_obj = self.obj._reindex_with_indexers( reindexers, allow_dups=True ) self.obj._mgr = new_obj._mgr self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: self._setitem_with_indexer_missing(indexer, value) return # align and set the values if take_split_path: # We have to operate column-wise self._setitem_with_indexer_split_path(indexer, value, name) else: self._setitem_single_block(indexer, value, name) def _setitem_with_indexer_split_path(self, indexer, value, name: str): """ Setitem column-wise. """ # Above we only set take_split_path to True for 2D cases assert self.ndim == 2 if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError("too many indices for array") if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: raise ValueError(r"Cannot set values with ndim > 2") if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): from pandas import Series value = self._align_series(indexer, Series(value)) # Ensure we have something we can iterate over info_axis = indexer[1] ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) # lplane_indexer gives the expected length of obj[indexer[0]] # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): # We are setting multiple rows in a single column. self._setitem_single_column(ilocs[0], value, pi) elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): # We are trying to set N values into M entries of a single # column, which is invalid for N != M # Exclude zero-len for e.g. boolean masking that is all-false if len(value) == 1 and not is_integer(info_axis): # This is a case like df.iloc[:3, [1]] = [0] # where we treat as df.iloc[:3, 1] = 0 return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask pass elif len(ilocs) == len(value): # We are setting multiple columns in a single row. for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: # This is a setitem-with-expansion, see # test_loc_setitem_empty_append_expands_rows_mixed_dtype # e.g. df = DataFrame(columns=["x", "y"]) # df["x"] = df["x"].astype(np.int64) # df.loc[:, "x"] = [1, 2, 3] self._setitem_single_column(ilocs[0], value, pi) else: raise ValueError( "Must have equal len keys and value " "when setting with an iterable" ) else: # scalar value for loc in ilocs: self._setitem_single_column(loc, value, pi) def _setitem_with_indexer_2d_value(self, indexer, value): # We get here with np.ndim(value) == 2, excluding DataFrame, # which goes through _setitem_with_indexer_frame_value pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) # GH#7551 Note that this coerces the dtype if we are mixed value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError( "Must have equal len keys and value when setting with an ndarray" ) for i, loc in enumerate(ilocs): # setting with a list, re-coerces self._setitem_single_column(loc, value[:, i].tolist(), pi) def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str): ilocs = self._ensure_iterable_column_indexer(indexer[1]) sub_indexer = list(indexer) pi = indexer[0] multiindex_indexer = isinstance(self.obj.columns, MultiIndex) unique_cols = value.columns.is_unique # We do not want to align the value in case of iloc GH#37728 if name == "iloc": for i, loc in enumerate(ilocs): val = value.iloc[:, i] self._setitem_single_column(loc, val, pi) elif not unique_cols and value.columns.equals(self.obj.columns): # We assume we are already aligned, see # test_iloc_setitem_frame_duplicate_columns_multiple_blocks for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value.iloc[:, loc], multiindex_indexer, ) else: val = np.nan self._setitem_single_column(loc, val, pi) elif not unique_cols: raise ValueError("Setting with non-unique columns is not allowed.") else: for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer ) else: val = np.nan self._setitem_single_column(loc, val, pi) def _setitem_single_column(self, loc: int, value, plane_indexer): """ Parameters ---------- loc : int Indexer for column position plane_indexer : int, slice, listlike[int] The indexer we use for setitem along axis=0. """ pi = plane_indexer ser = self.obj._ixs(loc, axis=1) # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a # multi-dim object # GH#6149 (null slice), GH#10408 (full bounds) if com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)): ser = value elif ( is_array_like(value) and is_exact_shape_match(ser, value) and not is_empty_indexer(pi, value) ): if is_list_like(pi): ser = value[np.argsort(pi)] else: # in case of slice ser = value[pi] else: # set the item, possibly having a dtype change ser = ser.copy() ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) ser._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj._iset_item(loc, ser) def _setitem_single_block(self, indexer, value, name: str): """ _setitem_with_indexer for the case when we have a single Block. """ from pandas import Series info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) if isinstance(indexer, tuple): # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if ( len(indexer) > info_axis and is_integer(indexer[info_axis]) and all( com.is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis ) ): selected_item_labels = item_labels[indexer[info_axis]] if len(item_labels.get_indexer_for([selected_item_labels])) == 1: self.obj[selected_item_labels] = value return indexer = maybe_convert_ix(*indexer) if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): # TODO(EA): ExtensionBlock.setitem this causes issues with # setting for extensionarrays that store dicts. Need to decide # if it's worth supporting that. value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame) and name != "iloc": value = self._align_frame(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _setitem_with_indexer_missing(self, indexer, value): """ Insert new row(s) or column(s) into the Series or DataFrame. """ from pandas import Series # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an Int64Index, so # we will not create a duplicate index, rather # index to that element # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): # GH#22717 handle casting compatibility that np.concatenate # does incorrectly new_values = concat_compat([self.obj._values, new_values]) self.obj._mgr = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._mgr self.obj._maybe_update_cacher(clear=True) elif self.ndim == 2: if not len(self.obj.columns): # no columns and scalar raise ValueError("cannot set a frame with no defined columns") if isinstance(value, ABCSeries): # append a Series value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer elif isinstance(value, dict): value = Series( value, index=self.obj.columns, name=indexer, dtype=object ) else: # a list-list if is_list_like_indexer(value): # must have conforming columns if len(value) != len(self.obj.columns): raise ValueError("cannot set a row with mismatched columns") value = Series(value, index=self.obj.columns, name=indexer) self.obj._mgr = self.obj.append(value)._mgr self.obj._maybe_update_cacher(clear=True) def _ensure_iterable_column_indexer(self, column_indexer): """ Ensure that our column indexer is something that can be iterated over. """ if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False): """ Parameters ---------- indexer : tuple, slice, scalar Indexer used to get the locations that will be set to `ser`. ser : pd.Series Values to assign to the locations specified by `indexer`. multiindex_indexer : bool, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns ------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = (indexer,) if isinstance(indexer, tuple): # flatten np.ndarray indexers def ravel(i): return i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.ndim == 2 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: len_indexer = len(indexer[1]) ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values elif is_integer(indexer) and self.ndim == 1: if is_object_dtype(self.obj): return ser ax = self.obj._get_axis(0) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values[indexer] elif is_integer(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError("Incompatible indexer with Series") def _align_frame(self, indexer, df: DataFrame): is_frame = self.ndim == 2 if isinstance(indexer, tuple): idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.ravel() if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if ( isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and ax.nlevels != df.index.nlevels ): raise TypeError( "cannot align on a multi-index with out " "specifying the join levels" ) val = df.reindex(index=ax)._values return val raise ValueError("Incompatible indexer with DataFrame") class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ def _convert_key(self, key, is_setter: bool = False): raise AbstractMethodError(self) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = (key,) else: raise ValueError("Invalid call for scalar access (getting)!") key = self._convert_key(key) return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) key = list(self._convert_key(key, is_setter=True)) if len(key) != self.ndim: raise ValueError("Not enough indexers for scalar access (setting)!") self.obj._set_value(*key, value=value, takeable=self._takeable) @doc(IndexingMixin.at) class _AtIndexer(_ScalarAccessIndexer): _takeable = False def _convert_key(self, key, is_setter: bool = False): """ Require they keys to be the same type as the index. (so we don't fallback) """ # GH 26989 # For series, unpacking key needs to result in the label. # This is already the case for len(key) == 1; e.g. (1,) if self.ndim == 1 and len(key) > 1: key = (key,) # allow arbitrary setting if is_setter: return list(key) return key @property def _axes_are_unique(self) -> bool: # Only relevant for self.ndim == 2 assert self.ndim == 2 return self.obj.index.is_unique and self.obj.columns.is_unique def __getitem__(self, key): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (getting)!") return self.obj.loc[key] return super().__getitem__(key) def __setitem__(self, key, value): if self.ndim == 2 and not self._axes_are_unique: # GH#33041 fall back to .loc if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): raise ValueError("Invalid call for scalar access (setting)!") self.obj.loc[key] = value return return super().__setitem__(key, value) @doc(IndexingMixin.iat) class _iAtIndexer(_ScalarAccessIndexer): _takeable = True def _convert_key(self, key, is_setter: bool = False): """ Require integer args. (and convert to label arguments) """ for i in key: if not is_integer(i): raise ValueError("iAt based indexing can only have integer indexers") return key def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple """ _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup) def convert_to_index_sliceable(obj: DataFrame, key): """ If we are index sliceable, then return my slicer, otherwise return None. """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind="getitem") elif isinstance(key, str): # we are an actual column if key in obj.columns: return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx._supports_partial_string_indexing: try: res = idx._get_string_slice(str(key)) warnings.warn( "Indexing a DataFrame with a datetimelike index using a single " "string to slice the rows, like `frame[string]`, is deprecated " "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, stacklevel=3, ) return res except (KeyError, ValueError, NotImplementedError): return None return None def check_bool_indexer(index: Index, key) -> np.ndarray: """ Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. """ result = key if isinstance(key, ABCSeries) and not key.index.equals(index): result = result.reindex(index) mask = isna(result._values) if mask.any(): raise IndexingError( "Unalignable boolean Series provided as " "indexer (index of the boolean Series and of " "the indexed object do not match)." ) return result.astype(bool)._values if is_object_dtype(key): # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) elif not is_array_like(result): # GH 33924 # key may contain nan elements, check_array_indexer needs bool array result = pd_array(result, dtype=bool) return check_array_indexer(index, result) def convert_missing_indexer(indexer): """ Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer["key"] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ Create a filtered indexer that doesn't have any missing indexers. """ def get_indexer(_i, _idx): return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)) def maybe_convert_ix(*args): """ We likely want to take the cross-product. """ for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args) def is_nested_tuple(tup, labels) -> bool: """ Returns ------- bool """ # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False def is_label_like(key) -> bool: """ Returns ------- bool """ # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj: slice) -> bool: """ Returns ------- bool """ return ( obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) )
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/core/indexing.py
from pandas import ( TimedeltaIndex, timedelta_range, ) import pandas._testing as tm class TestTimedeltaIndexDelete: def test_delete(self): idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx") # preserve freq expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx") expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx") # reset freq to None expected_1 = TimedeltaIndex( ["1 day", "3 day", "4 day", "5 day"], freq=None, name="idx" ) cases = { 0: expected_0, -5: expected_0, -1: expected_4, 4: expected_4, 1: expected_1, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq with tm.external_error_raised((IndexError, ValueError)): # either depending on numpy version idx.delete(5) def test_delete_slice(self): idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx") # preserve freq expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx") expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx") # reset freq to None expected_3_5 = TimedeltaIndex( ["1 d", "2 d", "3 d", "7 d", "8 d", "9 d", "10d"], freq=None, name="idx" ) cases = { (0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5, } for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq result = idx.delete(slice(n[0], n[-1] + 1)) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_delete_doesnt_infer_freq(self): # GH#30655 behavior matches DatetimeIndex tdi = TimedeltaIndex(["1 Day", "2 Days", None, "3 Days", "4 Days"]) result = tdi.delete(2) assert result.freq is None
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/tests/indexes/timedeltas/test_delete.py
from __future__ import annotations from contextlib import contextmanager import re from typing import ( Sequence, Type, cast, ) import warnings @contextmanager def assert_produces_warning( expected_warning: type[Warning] | bool | None = Warning, filter_level="always", check_stacklevel: bool = True, raise_on_extra_warnings: bool = True, match: str | None = None, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. match : str, optional Match warning message. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: warnings.simplefilter(filter_level) yield w if expected_warning: expected_warning = cast(Type[Warning], expected_warning) _assert_caught_expected_warning( caught_warnings=w, expected_warning=expected_warning, match=match, check_stacklevel=check_stacklevel, ) if raise_on_extra_warnings: _assert_caught_no_extra_warnings( caught_warnings=w, expected_warning=expected_warning, ) def _assert_caught_expected_warning( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning], match: str | None, check_stacklevel: bool, ) -> None: """Assert that there was the expected warning among the caught warnings.""" saw_warning = False matched_message = False for actual_warning in caught_warnings: if issubclass(actual_warning.category, expected_warning): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): _assert_raised_with_correct_stacklevel(actual_warning) if match is not None and re.search(match, str(actual_warning.message)): matched_message = True if not saw_warning: raise AssertionError( f"Did not see expected warning of class " f"{repr(expected_warning.__name__)}" ) if match and not matched_message: raise AssertionError( f"Did not see warning {repr(expected_warning.__name__)} " f"matching {match}" ) def _assert_caught_no_extra_warnings( *, caught_warnings: Sequence[warnings.WarningMessage], expected_warning: type[Warning] | bool | None, ) -> None: """Assert that no extra warnings apart from the expected ones are caught.""" extra_warnings = [] for actual_warning in caught_warnings: if _is_unexpected_warning(actual_warning, expected_warning): unclosed = "unclosed transport <asyncio.sslproto._SSLProtocolTransport" if actual_warning.category == ResourceWarning and unclosed in str( actual_warning.message ): # FIXME: kludge because pytest.filterwarnings does not # suppress these, xref GH#38630 continue extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if extra_warnings: raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}") def _is_unexpected_warning( actual_warning: warnings.WarningMessage, expected_warning: type[Warning] | bool | None, ) -> bool: """Check if the actual warning issued is unexpected.""" if actual_warning and not expected_warning: return True expected_warning = cast(Type[Warning], expected_warning) return bool(not issubclass(actual_warning.category, expected_warning)) def _assert_raised_with_correct_stacklevel( actual_warning: warnings.WarningMessage, ) -> None: from inspect import ( getframeinfo, stack, ) caller = getframeinfo(stack()[4][0]) msg = ( "Warning not set with correct stacklevel. " f"File where warning is raised: {actual_warning.filename} != " f"{caller.filename}. Warning message: {actual_warning.message}" ) assert actual_warning.filename == caller.filename, msg
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/_testing/_warnings.py
from typing import Optional import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import maybe_downcast_numeric from pandas.core.dtypes.common import ( ensure_object, is_datetime_or_timedelta_dtype, is_decimal, is_integer_dtype, is_number, is_numeric_dtype, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ) import pandas as pd from pandas.core.arrays.numeric import NumericArray def to_numeric(arg, errors="raise", downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series Argument to be converted. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. Returns ------- ret Numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. DataFrame.convert_dtypes : Convert dtypes. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 Downcasting of nullable integer and floating dtypes is supported: >>> s = pd.Series([1, 2, 3], dtype="Int64") >>> pd.to_numeric(s, downcast="integer") 0 1 1 2 2 3 dtype: Int8 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") >>> pd.to_numeric(s, downcast="float") 0 1.0 1 2.1 2 3.0 dtype: Float32 """ if downcast not in (None, "integer", "signed", "unsigned", "float"): raise ValueError("invalid downcasting method provided") if errors not in ("ignore", "raise", "coerce"): raise ValueError("invalid error value specified") is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.asi8 else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype="O") elif getattr(arg, "ndim", 1) > 1: raise TypeError("arg must be a list, tuple, 1-d array, or Series") else: values = arg # GH33013: for IntegerArray & FloatingArray extract non-null values for casting # save mask to reconstruct the full array after casting mask: Optional[np.ndarray] = None if isinstance(values, NumericArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, "dtype", None) if is_numeric_dtype(values_dtype): pass elif is_datetime_or_timedelta_dtype(values_dtype): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: values, _ = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) except (ValueError, TypeError): if errors == "raise": raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values.dtype): typecodes = None if downcast in ("integer", "signed"): typecodes = np.typecodes["Integer"] elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes["UnsignedInteger"] elif downcast == "float": typecodes = np.typecodes["Float"] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: dtype = np.dtype(dtype) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) # successful conversion if values.dtype == dtype: break # GH33013: for IntegerArray & FloatingArray need to reconstruct masked array if mask is not None: data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ( FloatingArray, IntegerArray, ) klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray values = klass(data, mask.copy()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): df = DataFrame( { "string_data": ["a", "b", "c", "d", "e"], "bool_data": [True, True, False, False, False], "int_data": [10, 20, 30, 40, 50], } ) # Integer data are included in .describe() output, # Boolean and string data are not. result = df.describe() expected = DataFrame( {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) # Top value is a boolean value that is False result = df.describe(include=["bool"]) expected = DataFrame( {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] ) tm.assert_frame_equal(result, expected) def test_describe_empty_object(self): # GH#27183 df = DataFrame({"A": [None, None]}, dtype=object) result = df.describe() expected = DataFrame( {"A": [0, 0, np.nan, np.nan]}, dtype=object, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) result = df.iloc[:0].describe() tm.assert_frame_equal(result, expected) def test_describe_bool_frame(self): # GH#13891 df = DataFrame( { "bool_data_1": [False, False, True, True], "bool_data_2": [False, True, True, True], } ) result = df.describe() expected = DataFrame( {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) df = DataFrame( { "bool_data": [False, False, True, True, False], "int_data": [0, 1, 2, 3, 4], } ) result = df.describe() expected = DataFrame( {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) df = DataFrame( {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} ) result = df.describe() expected = DataFrame( {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, index=["count", "unique", "top", "freq"], ) tm.assert_frame_equal(result, expected) def test_describe_categorical(self): df = DataFrame({"value": np.random.randint(0, 10000, 100)}) labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] cat_labels = Categorical(labels, labels) df = df.sort_values(by=["value"], ascending=True) df["value_group"] = pd.cut( df.value, range(0, 10500, 500), right=False, labels=cat_labels ) cat = df # Categoricals should not show up together with numerical columns result = cat.describe() assert len(result.columns) == 1 # In a frame, describe() for the cat should be the same as for string # arrays (count, unique, top, freq) cat = Categorical( ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True ) s = Series(cat) result = s.describe() expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "b", "c", "c"])) df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) result = df3.describe() tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) def test_describe_empty_categorical_column(self): # GH#26397 # Ensure the index of an empty categorical DataFrame column # also contains (count, unique, top, freq) df = DataFrame({"empty_col": Categorical([])}) result = df.describe() expected = DataFrame( {"empty_col": [0, 0, np.nan, np.nan]}, index=["count", "unique", "top", "freq"], dtype="object", ) tm.assert_frame_equal(result, expected) # ensure NaN, not None assert np.isnan(result.iloc[2, 0]) assert np.isnan(result.iloc[3, 0]) def test_describe_categorical_columns(self): # GH#11558 columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") df = DataFrame( { "int1": [10, 20, 30, 40, 50], "int2": [10, 20, 30, 40, 50], "obj": ["A", 0, None, "X", 1], }, columns=columns, ) result = df.describe() exp_columns = pd.CategoricalIndex( ["int1", "int2"], categories=["int1", "int2", "obj"], ordered=True, name="XXX", ) expected = DataFrame( { "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], columns=exp_columns, ) tm.assert_frame_equal(result, expected) tm.assert_categorical_equal(result.columns.values, expected.columns.values) def test_describe_datetime_columns(self): columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS", tz="US/Eastern", name="XXX", ) df = DataFrame( { 0: [10, 20, 30, 40, 50], 1: [10, 20, 30, 40, 50], 2: ["A", 0, None, "X", 1], } ) df.columns = columns result = df.describe() exp_columns = pd.DatetimeIndex( ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" ) expected = DataFrame( { 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) expected.columns = exp_columns tm.assert_frame_equal(result, expected) assert result.columns.freq == "MS" assert result.columns.tz == expected.columns.tz def test_describe_timedelta_values(self): # GH#6145 t1 = pd.timedelta_range("1 days", freq="D", periods=5) t2 = pd.timedelta_range("1 hours", freq="H", periods=5) df = DataFrame({"t1": t1, "t2": t2}) expected = DataFrame( { "t1": [ 5, pd.Timedelta("3 days"), df.iloc[:, 0].std(), pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.Timedelta("4 days"), pd.Timedelta("5 days"), ], "t2": [ 5, pd.Timedelta("3 hours"), df.iloc[:, 1].std(), pd.Timedelta("1 hours"), pd.Timedelta("2 hours"), pd.Timedelta("3 hours"), pd.Timedelta("4 hours"), pd.Timedelta("5 hours"), ], }, index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) result = df.describe() tm.assert_frame_equal(result, expected) exp_repr = ( " t1 t2\n" "count 5 5\n" "mean 3 days 00:00:00 0 days 03:00:00\n" "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" "min 1 days 00:00:00 0 days 01:00:00\n" "25% 2 days 00:00:00 0 days 02:00:00\n" "50% 3 days 00:00:00 0 days 03:00:00\n" "75% 4 days 00:00:00 0 days 04:00:00\n" "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr def test_describe_tz_values(self, tz_naive_fixture): # GH#21332 tz = tz_naive_fixture s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) expected = DataFrame( { "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], "s2": [ 5, Timestamp(2018, 1, 3).tz_localize(tz), start.tz_localize(tz), s2[1], s2[2], s2[3], end.tz_localize(tz), np.nan, ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) result = df.describe(include="all", datetime_is_numeric=True) tm.assert_frame_equal(result, expected) def test_datetime_is_numeric_includes_datetime(self): df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) result = df.describe(datetime_is_numeric=True) expected = DataFrame( { "a": [ 3, Timestamp("2012-01-02"), Timestamp("2012-01-01"), Timestamp("2012-01-01T12:00:00"), Timestamp("2012-01-02"), Timestamp("2012-01-02T12:00:00"), Timestamp("2012-01-03"), np.nan, ], "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) tm.assert_frame_equal(result, expected) def test_describe_tz_values2(self): tz = "CET" s1 = Series(range(5)) start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s2 = Series(date_range(start, end, tz=tz)) df = DataFrame({"s1": s1, "s2": s2}) s1_ = s1.describe() s2_ = Series( [ 5, 5, s2.value_counts().index[0], 1, start.tz_localize(tz), end.tz_localize(tz), ], index=["count", "unique", "top", "freq", "first", "last"], ) idx = [ "count", "unique", "top", "freq", "first", "last", "mean", "std", "min", "25%", "50%", "75%", "max", ] expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] with tm.assert_produces_warning(FutureWarning): result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): # GH#26660 df = DataFrame({"x": [1]}) pct = np.linspace(0, 1, 10 + 1) result = df.describe(percentiles=pct) expected = DataFrame( {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, index=[ "count", "mean", "std", "min", "0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%", "max", ], ) tm.assert_frame_equal(result, expected) def test_describe_does_not_raise_error_for_dictlike_elements(self): # GH#32409 df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) expected = DataFrame( {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] ) result = df.describe() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) def test_describe_when_include_all_exclude_not_allowed(self, exclude): """ When include is 'all', then setting exclude != None is not allowed. """ df = DataFrame({"x": [1], "y": [2], "z": [3]}) msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) def test_describe_with_duplicate_columns(self): df = DataFrame( [[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["bar", "a", "a"], dtype="float64", ) result = df.describe() ser = df.iloc[:, 0].describe() expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) tm.assert_frame_equal(result, expected)
datapythonista/pandas
pandas/tests/frame/methods/test_describe.py
pandas/core/tools/numeric.py
"""Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process.""" import json import os import time import traceback from collections import OrderedDict from datetime import datetime from threading import Thread import yaml from yaycl import AttrDict from cfme.utils.conf import cfme_performance from cfme.utils.log import logger from cfme.utils.path import results_path from cfme.utils.version import current_version from cfme.utils.version import get_version miq_workers = [ 'MiqGenericWorker', 'MiqPriorityWorker', 'MiqScheduleWorker', 'MiqUiWorker', 'MiqWebServiceWorker', 'MiqWebsocketWorker', 'MiqReportingWorker', 'MiqReplicationWorker', 'MiqSmartProxyWorker', 'MiqVimBrokerWorker', 'MiqEmsRefreshCoreWorker', # Refresh Workers: 'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker', 'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker', 'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker', 'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker', 'MiqEmsRefreshWorkerMicrosoft', # 5.4 'MiqEmsRefreshWorkerRedhat', # 5.4 'MiqEmsRefreshWorkerVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker', 'ManageIQ::Providers::Azure::CloudManager::RefreshWorker', 'ManageIQ::Providers::Google::CloudManager::RefreshWorker', 'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker', 'MiqEmsRefreshWorkerAmazon', # 5.4 'MiqEmsRefreshWorkerOpenstack', # 5.4 'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker', 'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker', 'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker', 'MiqEmsRefreshWorkerForemanConfiguration', # 5.4 'MiqEmsRefreshWorkerForemanProvisioning', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker', 'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker', 'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker', 'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker', 'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker', 'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Google::NetworkManager::RefreshWorker', 'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker', 'MiqNetappRefreshWorker', 'MiqSmisRefreshWorker', # Event Workers: 'MiqEventHandler', 'ManageIQ::Providers::Openstack::InfraManager::EventCatcher', 'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher', 'ManageIQ::Providers::Redhat::InfraManager::EventCatcher', 'ManageIQ::Providers::Vmware::InfraManager::EventCatcher', 'MiqEventCatcherRedhat', # 5.4 'MiqEventCatcherVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::EventCatcher', 'ManageIQ::Providers::Azure::CloudManager::EventCatcher', 'ManageIQ::Providers::Google::CloudManager::EventCatcher', 'ManageIQ::Providers::Openstack::CloudManager::EventCatcher', 'MiqEventCatcherAmazon', # 5.4 'MiqEventCatcherOpenstack', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher', 'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher', 'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher', 'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher', # Metrics Processor/Collector Workers 'MiqEmsMetricsProcessorWorker', 'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker', 'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker', 'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker', 'MiqEmsMetricsCollectorWorkerRedhat', # 5.4 'MiqEmsMetricsCollectorWorkerVmware', # 5.4 'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker', 'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker', 'MiqEmsMetricsCollectorWorkerAmazon', # 5.4 'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4 'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker', 'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker', 'MiqStorageMetricsCollectorWorker', 'MiqVmdbStorageBridgeWorker'] ruby_processes = list(miq_workers) ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb', 'appliance_console.rb']) process_order = list(ruby_processes) process_order.extend(['memcached', 'postgres', 'httpd', 'collectd']) # Timestamp created at first import, thus grouping all reports of like workload test_ts = time.strftime('%Y%m%d%H%M%S') # 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work) SAMPLE_INTERVAL = 10 class SmemMemoryMonitor(Thread): def __init__(self, ssh_client, scenario_data): super(SmemMemoryMonitor, self).__init__() self.ssh_client = ssh_client self.scenario_data = scenario_data self.grafana_urls = {} self.miq_server_id = '' self.use_slab = False self.signal = True def create_process_result(self, process_results, starttime, process_pid, process_name, memory_by_pid): if process_pid in list(memory_by_pid.keys()): if process_name not in process_results: process_results[process_name] = OrderedDict() process_results[process_name][process_pid] = OrderedDict() if process_pid not in process_results[process_name]: process_results[process_name][process_pid] = OrderedDict() process_results[process_name][process_pid][starttime] = {} rss_mem = memory_by_pid[process_pid]['rss'] pss_mem = memory_by_pid[process_pid]['pss'] uss_mem = memory_by_pid[process_pid]['uss'] vss_mem = memory_by_pid[process_pid]['vss'] swap_mem = memory_by_pid[process_pid]['swap'] process_results[process_name][process_pid][starttime]['rss'] = rss_mem process_results[process_name][process_pid][starttime]['pss'] = pss_mem process_results[process_name][process_pid][starttime]['uss'] = uss_mem process_results[process_name][process_pid][starttime]['vss'] = vss_mem process_results[process_name][process_pid][starttime]['swap'] = swap_mem del memory_by_pid[process_pid] else: logger.warning('Process {} PID, not found: {}'.format(process_name, process_pid)) def get_appliance_memory(self, appliance_results, plottime): # 5.5/5.6 - RHEL 7 / Centos 7 # Application Memory Used : MemTotal - (MemFree + Slab + Cached) # 5.4 - RHEL 6 / Centos 6 # Application Memory Used : MemTotal - (MemFree + Buffers + Cached) # Available memory could potentially be better metric appliance_results[plottime] = {} result = self.ssh_client.run_command('cat /proc/meminfo') if result.failed: logger.error('Exit_status nonzero in get_appliance_memory: {}, {}' .format(result.rc, result.output)) del appliance_results[plottime] else: meminfo_raw = result.output.replace('kB', '').strip() meminfo = OrderedDict((k.strip(), v.strip()) for k, v in (value.strip().split(':') for value in meminfo_raw.split('\n'))) appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024 appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024 if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7 self.use_slab = True mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float( meminfo['Slab']) + float(meminfo['Cached']))) / 1024 else: # 5.4, RHEL 6/Centos 6 mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float( meminfo['Buffers']) + float(meminfo['Cached']))) / 1024 appliance_results[plottime]['used'] = mem_used appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024 appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024 appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024 appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024 appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024 def get_evm_workers(self): result = self.ssh_client.run_command( 'psql -t -q -d vmdb_production -c ' '\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format( self.miq_server_id)) if result.output.strip(): workers = {} for worker in result.output.strip().split('\n'): pid_worker = worker.strip().split('|') if len(pid_worker) == 2: workers[pid_worker[0].strip()] = pid_worker[1].strip() else: logger.error('Unexpected output from psql: {}'.format(worker)) return workers else: return {} # Old method of obtaining per process memory (Appliances without smem) # def get_pids_memory(self): # result = self.ssh_client.run_command( # 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d') # pids_memory = result.output.strip().split('\n') # memory_by_pid = {} # for line in pids_memory: # values = [s for s in line.strip().split(' ') if s] # pid = values[0] # memory_by_pid[pid] = {} # memory_by_pid[pid]['rss'] = float(values[1]) / 1024 # memory_by_pid[pid]['vss'] = float(values[2]) / 1024 # memory_by_pid[pid]['name'] = values[3] # memory_by_pid[pid]['cmd'] = ' '.join(values[4:]) # return memory_by_pid def get_miq_server_id(self): # Obtain the Miq Server GUID: result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID') logger.info('Obtained appliance GUID: {}'.format(result.output.strip())) # Get server id: result = self.ssh_client.run_command( 'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"' ''.format(result.output.strip())) logger.info('Obtained miq_server_id: {}'.format(result.output.strip())) self.miq_server_id = result.output.strip() def get_pids_memory(self): result = self.ssh_client.run_command( 'smem -c \'pid rss pss uss vss swap name command\' | sed 1d') pids_memory = result.output.strip().split('\n') memory_by_pid = {} for line in pids_memory: if line.strip(): try: values = [s for s in line.strip().split(' ') if s] pid = values[0] int(pid) memory_by_pid[pid] = {} memory_by_pid[pid]['rss'] = float(values[1]) / 1024 memory_by_pid[pid]['pss'] = float(values[2]) / 1024 memory_by_pid[pid]['uss'] = float(values[3]) / 1024 memory_by_pid[pid]['vss'] = float(values[4]) / 1024 memory_by_pid[pid]['swap'] = float(values[5]) / 1024 memory_by_pid[pid]['name'] = values[6] memory_by_pid[pid]['cmd'] = ' '.join(values[7:]) except Exception as e: logger.error('Processing smem output error: {}'.format(e.__class__.__name__, e)) logger.error('Issue with pid: {} line: {}'.format(pid, line)) logger.error('Complete smem output: {}'.format(result.output)) return memory_by_pid def _real_run(self): """ Result dictionaries: appliance_results[timestamp][measurement] = value appliance_results[timestamp]['total'] = value appliance_results[timestamp]['free'] = value appliance_results[timestamp]['used'] = value appliance_results[timestamp]['buffers'] = value appliance_results[timestamp]['cached'] = value appliance_results[timestamp]['slab'] = value appliance_results[timestamp]['swap_total'] = value appliance_results[timestamp]['swap_free'] = value appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free process_results[name][pid][timestamp][measurement] = value process_results[name][pid][timestamp]['rss'] = value process_results[name][pid][timestamp]['pss'] = value process_results[name][pid][timestamp]['uss'] = value process_results[name][pid][timestamp]['vss'] = value process_results[name][pid][timestamp]['swap'] = value """ appliance_results = OrderedDict() process_results = OrderedDict() install_smem(self.ssh_client) self.get_miq_server_id() logger.info('Starting Monitoring Thread.') while self.signal: starttime = time.time() plottime = datetime.now() self.get_appliance_memory(appliance_results, plottime) workers = self.get_evm_workers() memory_by_pid = self.get_pids_memory() for worker_pid in workers: self.create_process_result(process_results, plottime, worker_pid, workers[worker_pid], memory_by_pid) for pid in sorted(memory_by_pid.keys()): if memory_by_pid[pid]['name'] == 'httpd': self.create_process_result(process_results, plottime, pid, 'httpd', memory_by_pid) elif memory_by_pid[pid]['name'] == 'postgres': self.create_process_result(process_results, plottime, pid, 'postgres', memory_by_pid) elif memory_by_pid[pid]['name'] == 'postmaster': self.create_process_result(process_results, plottime, pid, 'postgres', memory_by_pid) elif memory_by_pid[pid]['name'] == 'memcached': self.create_process_result(process_results, plottime, pid, 'memcached', memory_by_pid) elif memory_by_pid[pid]['name'] == 'collectd': self.create_process_result(process_results, plottime, pid, 'collectd', memory_by_pid) elif memory_by_pid[pid]['name'] == 'ruby': if 'evm_server.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'MIQ Server (evm_server.rb)', memory_by_pid) elif 'MIQ Server' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'MIQ Server (evm_server.rb)', memory_by_pid) elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'evm_watchdog.rb', memory_by_pid) elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'appliance_console.rb', memory_by_pid) elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']: self.create_process_result(process_results, plottime, pid, 'evm:dbsync:replicate', memory_by_pid) else: logger.debug('Unaccounted for ruby pid: {}'.format(pid)) timediff = time.time() - starttime logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4))) # Sleep Monitoring interval # Roughly 10s samples, accounts for collection of memory measurements time_to_sleep = abs(SAMPLE_INTERVAL - timediff) time.sleep(time_to_sleep) logger.info('Monitoring CFME Memory Terminating') create_report(self.scenario_data, appliance_results, process_results, self.use_slab, self.grafana_urls) def run(self): try: self._real_run() except Exception as e: logger.error('Error in Monitoring Thread: {}'.format(e)) logger.error('{}'.format(traceback.format_exc())) def install_smem(ssh_client): # smem is included by default in 5.6 appliances logger.info('Installing smem.') ver = get_version() if ver == '55': ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm'])) ssh_client.run_command('yum install -y smem') # Patch smem to display longer command line names logger.info('Patching smem') ssh_client.run_command(r'sed -i s/\.27s/\.200s/g /usr/bin/smem') def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls): logger.info('Creating Memory Monitoring Report.') ver = current_version() provider_names = 'No Providers' if 'providers' in scenario_data['scenario']: provider_names = ', '.join(scenario_data['scenario']['providers']) workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver)) if not os.path.exists(str(workload_path)): os.makedirs(str(workload_path)) scenario_path = workload_path.join(scenario_data['scenario']['name']) if os.path.exists(str(scenario_path)): logger.warning('Duplicate Workload-Scenario Name: {}'.format(scenario_path)) scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'), scenario_data['scenario']['name'])) logger.warning('Using: {}'.format(scenario_path)) os.mkdir(str(scenario_path)) mem_graphs_path = scenario_path.join('graphs') if not os.path.exists(str(mem_graphs_path)): os.mkdir(str(mem_graphs_path)) mem_rawdata_path = scenario_path.join('rawdata') if not os.path.exists(str(mem_rawdata_path)): os.mkdir(str(mem_rawdata_path)) graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names) graph_individual_process_measurements(mem_graphs_path, process_results, provider_names) graph_same_miq_workers(mem_graphs_path, process_results, provider_names) graph_all_miq_workers(mem_graphs_path, process_results, provider_names) # Dump scenario Yaml: with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file: yaml.safe_dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False) generate_summary_csv(scenario_path.join('{}-summary.csv'.format(ver)), appliance_results, process_results, provider_names, ver) generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results) generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data, provider_names, grafana_urls) generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls) logger.info('Finished Creating Report') def compile_per_process_results(procs_to_compile, process_results, ts_end): alive_pids = 0 recycled_pids = 0 total_running_rss = 0 total_running_pss = 0 total_running_uss = 0 total_running_vss = 0 total_running_swap = 0 for process in procs_to_compile: if process in process_results: for pid in process_results[process]: if ts_end in process_results[process][pid]: alive_pids += 1 total_running_rss += process_results[process][pid][ts_end]['rss'] total_running_pss += process_results[process][pid][ts_end]['pss'] total_running_uss += process_results[process][pid][ts_end]['uss'] total_running_vss += process_results[process][pid][ts_end]['vss'] total_running_swap += process_results[process][pid][ts_end]['swap'] else: recycled_pids += 1 return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \ total_running_vss, total_running_swap def generate_raw_data_csv(directory, appliance_results, process_results): starttime = time.time() file_name = str(directory.join('appliance.csv')) with open(file_name, 'w') as csv_file: csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n') for ts in appliance_results: csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts, appliance_results[ts]['total'], appliance_results[ts]['free'], appliance_results[ts]['used'], appliance_results[ts]['buffers'], appliance_results[ts]['cached'], appliance_results[ts]['slab'], appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free'])) for process_name in process_results: for process_pid in process_results[process_name]: file_name = str(directory.join('{}-{}.csv'.format(process_pid, process_name))) with open(file_name, 'w') as csv_file: csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n') for ts in process_results[process_name][process_pid]: csv_file.write('{},{},{},{},{},{}\n'.format(ts, process_results[process_name][process_pid][ts]['rss'], process_results[process_name][process_pid][ts]['pss'], process_results[process_name][process_pid][ts]['uss'], process_results[process_name][process_pid][ts]['vss'], process_results[process_name][process_pid][ts]['swap'])) timediff = time.time() - starttime logger.info('Generated Raw Data CSVs in: {}'.format(timediff)) def generate_summary_csv(file_name, appliance_results, process_results, provider_names, version_string): starttime = time.time() with open(str(file_name), 'w') as csv_file: csv_file.write('Version: {}, Provider(s): {}\n'.format(version_string, provider_names)) csv_file.write('Measurement,Start of test,End of test\n') start = list(appliance_results.keys())[0] end = list(appliance_results.keys())[-1] csv_file.write('Appliance Total Memory,{},{}\n'.format( round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2))) csv_file.write('Appliance Free Memory,{},{}\n'.format( round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2))) csv_file.write('Appliance Used Memory,{},{}\n'.format( round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2))) csv_file.write('Appliance Buffers,{},{}\n'.format( round(appliance_results[start]['buffers'], 2), round(appliance_results[end]['buffers'], 2))) csv_file.write('Appliance Cached,{},{}\n'.format( round(appliance_results[start]['cached'], 2), round(appliance_results[end]['cached'], 2))) csv_file.write('Appliance Slab,{},{}\n'.format( round(appliance_results[start]['slab'], 2), round(appliance_results[end]['slab'], 2))) csv_file.write('Appliance Total Swap,{},{}\n'.format( round(appliance_results[start]['swap_total'], 2), round(appliance_results[end]['swap_total'], 2))) csv_file.write('Appliance Free Swap,{},{}\n'.format( round(appliance_results[start]['swap_free'], 2), round(appliance_results[end]['swap_free'], 2))) summary_csv_measurement_dump(csv_file, process_results, 'rss') summary_csv_measurement_dump(csv_file, process_results, 'pss') summary_csv_measurement_dump(csv_file, process_results, 'uss') summary_csv_measurement_dump(csv_file, process_results, 'vss') summary_csv_measurement_dump(csv_file, process_results, 'swap') timediff = time.time() - starttime logger.info('Generated Summary CSV in: {}'.format(timediff)) def generate_summary_html(directory, version_string, appliance_results, process_results, scenario_data, provider_names, grafana_urls): starttime = time.time() file_name = str(directory.join('index.html')) with open(file_name, 'w') as html_file: html_file.write('<html>\n') html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format( version_string, provider_names)) html_file.write('<body>\n') html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string, scenario_data['test_name'].title())) html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format( scenario_data['appliance_roles'].replace(',', ', '))) html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names)) html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format( scenario_data['appliance_ip'], scenario_data['appliance_name'])) if grafana_urls: for g_name in sorted(grafana_urls.keys()): html_file.write( ' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name], g_name)) html_file.write('<br>\n') html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(version_string)) html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>') html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n') html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n') start = list(appliance_results.keys())[0] end = list(appliance_results.keys())[-1] timediff = end - start total_proc_count = 0 for proc_name in process_results: total_proc_count += len(list(process_results[proc_name].keys())) growth = appliance_results[end]['used'] - appliance_results[start]['used'] max_used_memory = 0 for ts in appliance_results: if appliance_results[ts]['used'] > max_used_memory: max_used_memory = appliance_results[ts]['used'] html_file.write('<table border="1">\n') html_file.write('<tr><td>\n') # Appliance Wide Results html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Version</b></td>\n') html_file.write('<td><b>Start Time</b></td>\n') html_file.write('<td><b>End Time</b></td>\n') html_file.write('<td><b>Total Test Time</b></td>\n') html_file.write('<td><b>Total Memory</b></td>\n') html_file.write('<td><b>Start Used Memory</b></td>\n') html_file.write('<td><b>End Used Memory</b></td>\n') html_file.write('<td><b>Used Memory Growth</b></td>\n') html_file.write('<td><b>Max Used Memory</b></td>\n') html_file.write('<td><b>Total Tracked Processes</b></td>\n') html_file.write('</tr>\n') html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format( version_string)) html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0])) html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2))) html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2))) html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2))) html_file.write('<td>{}</td>\n'.format(round(growth, 2))) html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2))) html_file.write('<td>{}</td>\n'.format(total_proc_count)) html_file.write('</table>\n') # CFME/Miq Worker Results html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n') html_file.write('<td><b>End Running Workers</b></td>\n') html_file.write('<td><b>Recycled Workers</b></td>\n') html_file.write('<td><b>End Total Worker RSS</b></td>\n') html_file.write('<td><b>End Total Worker PSS</b></td>\n') html_file.write('<td><b>End Total Worker USS</b></td>\n') html_file.write('<td><b>End Total Worker VSS</b></td>\n') html_file.write('<td><b>End Total Worker SWAP</b></td>\n') html_file.write('</tr>\n') a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( miq_workers, process_results, end) html_file.write('<tr>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') html_file.write('</table>\n') # Per Process Summaries: html_file.write('<table style="width:100%" border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b>Application/Process Group</b></td>\n') html_file.write('<td><b>Total Processes</b></td>\n') html_file.write('<td><b>End Running Processes</b></td>\n') html_file.write('<td><b>Recycled Processes</b></td>\n') html_file.write('<td><b>End Total Process RSS</b></td>\n') html_file.write('<td><b>End Total Process PSS</b></td>\n') html_file.write('<td><b>End Total Process USS</b></td>\n') html_file.write('<td><b>End Total Process VSS</b></td>\n') html_file.write('<td><b>End Total Process SWAP</b></td>\n') html_file.write('</tr>\n') a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ruby_processes, process_results, end) t_a_pids = a_pids t_r_pids = r_pids tt_rss = t_rss tt_pss = t_pss tt_uss = t_uss tt_vss = t_vss tt_swap = t_swap html_file.write('<tr>\n') html_file.write('<td>ruby</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # memcached Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['memcached'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>memcached</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # Postgres Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['postgres'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>postgres</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # httpd Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>httpd</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') # collectd Summary a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results( ['collectd'], process_results, end) t_a_pids += a_pids t_r_pids += r_pids tt_rss += t_rss tt_pss += t_pss tt_uss += t_uss tt_vss += t_vss tt_swap += t_swap html_file.write('<tr>\n') html_file.write('<td>collectd</td>\n') html_file.write('<td>{}</td>\n'.format(a_pids + r_pids)) html_file.write('<td>{}</td>\n'.format(a_pids)) html_file.write('<td>{}</td>\n'.format(r_pids)) html_file.write('<td>{}</td>\n'.format(round(t_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(t_swap, 2))) html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>total</td>\n') html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids)) html_file.write('<td>{}</td>\n'.format(t_a_pids)) html_file.write('<td>{}</td>\n'.format(t_r_pids)) html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2))) html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2))) html_file.write('</tr>\n') html_file.write('</table>\n') # Appliance Graph html_file.write('</td></tr><tr><td>\n') file_name = '{}-appliance_memory.png'.format(version_string) html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name)) file_name = '{}-appliance_swap.png'.format(version_string) # Check for swap usage through out time frame: max_swap_used = 0 for ts in appliance_results: swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free'] if swap_used > max_swap_used: max_swap_used = swap_used if max_swap_used < 10: # Less than 10MiB Max, then hide graph html_file.write('<br><a href=\'graphs/{}\'>Swap Graph '.format(file_name)) html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n') else: html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name)) html_file.write('</td></tr><tr><td>\n') # Per Process Results html_file.write('<table style="width:100%" border="1"><tr>\n') html_file.write('<td><b>Process Name</b></td>\n') html_file.write('<td><b>Process Pid</b></td>\n') html_file.write('<td><b>Start Time</b></td>\n') html_file.write('<td><b>End Time</b></td>\n') html_file.write('<td><b>Time Alive</b></td>\n') html_file.write('<td><b>RSS Mem Start</b></td>\n') html_file.write('<td><b>RSS Mem End</b></td>\n') html_file.write('<td><b>RSS Mem Change</b></td>\n') html_file.write('<td><b>PSS Mem Start</b></td>\n') html_file.write('<td><b>PSS Mem End</b></td>\n') html_file.write('<td><b>PSS Mem Change</b></td>\n') html_file.write('<td><b>CSV</b></td>\n') html_file.write('</tr>\n') # By Worker Type Memory Used for ordered_name in process_order: if ordered_name in process_results: for pid in process_results[ordered_name]: start = list(process_results[ordered_name][pid].keys())[0] end = list(process_results[ordered_name][pid].keys())[-1] timediff = end - start html_file.write('<tr>\n') if len(process_results[ordered_name]) > 1: html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name, ordered_name)) html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format( ordered_name, pid, pid)) else: html_file.write('<td>{}</td>\n'.format(ordered_name)) html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format( ordered_name, pid, pid)) html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0))) html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0])) rss_change = process_results[ordered_name][pid][end]['rss'] - \ process_results[ordered_name][pid][start]['rss'] html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][start]['rss'], 2))) html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][end]['rss'], 2))) html_file.write('<td>{}</td>\n'.format(round(rss_change, 2))) pss_change = process_results[ordered_name][pid][end]['pss'] - \ process_results[ordered_name][pid][start]['pss'] html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][start]['pss'], 2))) html_file.write('<td>{}</td>\n'.format( round(process_results[ordered_name][pid][end]['pss'], 2))) html_file.write('<td>{}</td>\n'.format(round(pss_change, 2))) html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format( pid, ordered_name)) html_file.write('</tr>\n') else: logger.debug('Process/Worker not part of test: {}'.format(ordered_name)) html_file.write('</table>\n') # Worker Graphs for ordered_name in process_order: if ordered_name in process_results: html_file.write('<tr><td>\n') html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format( ordered_name, ordered_name)) if len(process_results[ordered_name]) > 1: file_name = '{}-all.png'.format(ordered_name) html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name, file_name)) else: for pid in sorted(process_results[ordered_name]): file_name = '{}-{}.png'.format(ordered_name, pid) html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format( file_name, file_name)) html_file.write('</td></tr>\n') html_file.write('</table>\n') html_file.write('</body>\n') html_file.write('</html>\n') timediff = time.time() - starttime logger.info('Generated Summary html in: {}'.format(timediff)) def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls): starttime = time.time() file_name = str(directory.join('workload.html')) with open(file_name, 'w') as html_file: html_file.write('<html>\n') html_file.write('<head><title>{} - {}</title></head>'.format( scenario_data['test_name'], provider_names)) html_file.write('<body>\n') html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver, scenario_data['test_name'].title())) html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format( scenario_data['appliance_roles'].replace(',', ', '))) html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names)) html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format( scenario_data['appliance_ip'], scenario_data['appliance_name'])) if grafana_urls: for g_name in sorted(grafana_urls.keys()): html_file.write( ' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name], g_name)) html_file.write('<br>\n') html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(ver)) html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>') html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n') html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n') html_file.write('<br><b>Scenario Data: </b><br>\n') yaml_html = get_scenario_html(scenario_data['scenario']) html_file.write(yaml_html + '\n') html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n') html_file.write('<table border="1">\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> System Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') system_path = ('../version_info/system.csv') html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>' .format(system_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') process_path = ('../version_info/processes.csv') html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>' .format(process_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') gems_path = ('../version_info/gems.csv') html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>' .format(gems_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>&nbsp</td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n') html_file.write('</tr>\n') html_file.write('<tr>\n') html_file.write('<td>\n') rpms_path = ('../version_info/rpms.csv') html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>' .format(rpms_path, test_ts, scenario_data['scenario']['name'])) html_file.write('</td>\n') html_file.write('</tr>\n') html_file.write('</table>\n') html_file.write('</body>\n') html_file.write('</html>\n') timediff = time.time() - starttime logger.info('Generated Workload html in: {}'.format(timediff)) def add_workload_quantifiers(quantifiers, scenario_data): starttime = time.time() ver = current_version() workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver)) directory = workload_path.join(scenario_data['scenario']['name']) file_name = str(directory.join('workload.html')) marker = '<b>Quantifier Data: </b>' yaml_dict = quantifiers yaml_string = str(json.dumps(yaml_dict, indent=4)) yaml_html = yaml_string.replace('\n', '<br>\n') with open(file_name, 'r+') as html_file: line = '' while marker not in line: line = html_file.readline() marker_pos = html_file.tell() remainder = html_file.read() html_file.seek(marker_pos) html_file.write('{} \n'.format(yaml_html)) html_file.write(remainder) timediff = time.time() - starttime logger.info('Added quantifiers in: {}'.format(timediff)) def get_scenario_html(scenario_data): scenario_dict = create_dict(scenario_data) scenario_yaml = yaml.safe_dump(scenario_dict) scenario_html = scenario_yaml.replace('\n', '<br>\n') scenario_html = scenario_html.replace(', ', '<br>\n &nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;') scenario_html = scenario_html.replace(' ', '&nbsp;') scenario_html = scenario_html.replace('[', '<br>\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;-&nbsp;') scenario_html = scenario_html.replace(']', '\n') return scenario_html def create_dict(attr_dict): main_dict = dict(attr_dict) for key, value in main_dict.items(): if type(value) == AttrDict: main_dict[key] = create_dict(value) return main_dict def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt from cycler import cycler starttime = time.time() dates = list(appliance_results.keys()) total_memory_list = list(appliance_results[ts]['total'] for ts in appliance_results.keys()) free_memory_list = list(appliance_results[ts]['free'] for ts in appliance_results.keys()) used_memory_list = list(appliance_results[ts]['used'] for ts in appliance_results.keys()) buffers_memory_list = list(appliance_results[ts]['buffers'] for ts in appliance_results.keys()) cache_memory_list = list(appliance_results[ts]['cached'] for ts in appliance_results.keys()) slab_memory_list = list(appliance_results[ts]['slab'] for ts in appliance_results.keys()) swap_total_list = list(appliance_results[ts]['swap_total'] for ts in appliance_results.keys()) swap_free_list = list(appliance_results[ts]['swap_free'] for ts in appliance_results.keys()) # Stack Plot Memory Usage file_name = graphs_path.join('{}-appliance_memory.png'.format(ver)) mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue', 'forestgreen']) fig, ax = plt.subplots() plt.title('Provider(s): {}\nAppliance Memory'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') if use_slab: y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list] else: y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list] plt.stackplot(dates, *y, baseline='zero') ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]), xytext=(4, -4), textcoords='offset points') if use_slab: ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + slab_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] + slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[-1], 2)), xy=( dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]), xytext=(4, -4), textcoords='offset points') else: ax.annotate(str(round(buffers_memory_list[0], 2)), xy=( dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] + buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(cache_memory_list[-1], 2)), xy=( dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick') p2 = plt.Rectangle((0, 0), 1, 1, fc='coral') p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue') p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen') if use_slab: ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'], bbox_to_anchor=(1.45, 0.22), fancybox=True) else: ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'], bbox_to_anchor=(1.45, 0.22), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() # Stack Plot Swap usage mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen']) file_name = graphs_path.join('{}-appliance_swap.png'.format(ver)) fig, ax = plt.subplots() plt.title('Provider(s): {}\nAppliance Swap'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Swap (MiB)') swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)] y = [swap_used_list, swap_free_list] plt.stackplot(dates, *y, baseline='zero') ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]), xytext=(4, -4), textcoords='offset points') ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick') p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen') ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() # Reset Colors mpl.rcdefaults() timediff = time.time() - starttime logger.info('Plotted Appliance Memory in: {}'.format(timediff)) def graph_all_miq_workers(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() file_name = graph_file_path.join('all-processes.png') fig, ax = plt.subplots() plt.title('Provider(s): {}\nAll Workers/Monitored Processes'.format(provider_names)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') for process_name in process_results: if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name: for process_pid in process_results[process_name]: dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid, process_name)) plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format( process_pid, process_name)) datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted All Type/Process Memory in: {}'.format(timediff)) def graph_individual_process_measurements(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() for process_name in process_results: for process_pid in process_results[process_name]: file_name = graph_file_path.join('{}-{}.png'.format(process_name, process_pid)) dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) pss_samples = list(process_results[process_name][process_pid][ts]['pss'] for ts in process_results[process_name][process_pid].keys()) uss_samples = list(process_results[process_name][process_pid][ts]['uss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) swap_samples = list(process_results[process_name][process_pid][ts]['swap'] for ts in process_results[process_name][process_pid].keys()) fig, ax = plt.subplots() plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names, process_name, process_pid)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') plt.plot(dates, rss_samples, linewidth=1, label='RSS') plt.plot(dates, pss_samples, linewidth=1, label='PSS') plt.plot(dates, uss_samples, linewidth=1, label='USS') plt.plot(dates, vss_samples, linewidth=1, label='VSS') plt.plot(dates, swap_samples, linewidth=1, label='Swap') if rss_samples: ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]), xytext=(4, -4), textcoords='offset points') if pss_samples: ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]), xytext=(4, -4), textcoords='offset points') if uss_samples: ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]), xytext=(4, -4), textcoords='offset points') if vss_samples: ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]), xytext=(4, -4), textcoords='offset points') if swap_samples: ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted Individual Process Memory in: {}'.format(timediff)) def graph_same_miq_workers(graph_file_path, process_results, provider_names): import matplotlib as mpl mpl.use('Agg') import matplotlib.dates as mdates import matplotlib.pyplot as plt starttime = time.time() for process_name in process_results: if len(process_results[process_name]) > 1: logger.debug('Plotting {} {} processes on single graph.'.format( len(process_results[process_name]), process_name)) file_name = graph_file_path.join('{}-all.png'.format(process_name)) fig, ax = plt.subplots() pids = 'PIDs: ' for i, pid in enumerate(process_results[process_name], 1): pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0])) pids = pids[0:-2] plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names, process_name, pids)) plt.xlabel('Date / Time') plt.ylabel('Memory (MiB)') for process_pid in process_results[process_name]: dates = list(process_results[process_name][process_pid].keys()) rss_samples = list(process_results[process_name][process_pid][ts]['rss'] for ts in process_results[process_name][process_pid].keys()) pss_samples = list(process_results[process_name][process_pid][ts]['pss'] for ts in process_results[process_name][process_pid].keys()) uss_samples = list(process_results[process_name][process_pid][ts]['uss'] for ts in process_results[process_name][process_pid].keys()) vss_samples = list(process_results[process_name][process_pid][ts]['vss'] for ts in process_results[process_name][process_pid].keys()) swap_samples = list(process_results[process_name][process_pid][ts]['swap'] for ts in process_results[process_name][process_pid].keys()) plt.plot(dates, rss_samples, linewidth=1, label='{} RSS'.format(process_pid)) plt.plot(dates, pss_samples, linewidth=1, label='{} PSS'.format(process_pid)) plt.plot(dates, uss_samples, linewidth=1, label='{} USS'.format(process_pid)) plt.plot(dates, vss_samples, linewidth=1, label='{} VSS'.format(process_pid)) plt.plot(dates, swap_samples, linewidth=1, label='{} SWAP'.format(process_pid)) if rss_samples: ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]), xytext=(4, -4), textcoords='offset points') if pss_samples: ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]), xytext=(4, -4), textcoords='offset points') if uss_samples: ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]), xytext=(4, -4), textcoords='offset points') if vss_samples: ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]), xytext=(4, -4), textcoords='offset points') if swap_samples: ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]), xytext=(4, 4), textcoords='offset points') ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]), xytext=(4, -4), textcoords='offset points') datefmt = mdates.DateFormatter('%m-%d %H-%M') ax.xaxis.set_major_formatter(datefmt) ax.grid(True) plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True) fig.autofmt_xdate() plt.savefig(str(file_name), bbox_inches='tight') plt.close() timediff = time.time() - starttime logger.info('Plotted Same Type/Process Memory in: {}'.format(timediff)) def summary_csv_measurement_dump(csv_file, process_results, measurement): csv_file.write('---------------------------------------------\n') csv_file.write('Per Process {} Memory Usage\n'.format(measurement.upper())) csv_file.write('---------------------------------------------\n') csv_file.write('Process/Worker Type,PID,Start of test,End of test\n') for ordered_name in process_order: if ordered_name in process_results: for process_pid in sorted(process_results[ordered_name]): start = list(process_results[ordered_name][process_pid].keys())[0] end = list(process_results[ordered_name][process_pid].keys())[-1] csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid, round(process_results[ordered_name][process_pid][start][measurement], 2), round(process_results[ordered_name][process_pid][end][measurement], 2)))
# -*- coding: utf-8 -*- from datetime import datetime import pytest from cfme import test_requirements from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import ONE_PER_TYPE from cfme.services.myservice.ui import MyServiceDetailView from cfme.utils import browser from cfme.utils.appliance import ViaUI from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.browser import ensure_browser_open from cfme.utils.update import update from cfme.utils.version import appliance_is_downstream from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.usefixtures('setup_provider', 'catalog_item'), pytest.mark.meta(server_roles="+automate"), pytest.mark.long_running, test_requirements.service, pytest.mark.tier(2), pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, scope="module"), ] @pytest.fixture def needs_firefox(): """ Fixture which skips the test if not run under firefox. I recommend putting it in the first place. """ ensure_browser_open() if browser.browser().name != "firefox": pytest.skip(msg="This test needs firefox to run") @pytest.mark.parametrize('context', [ViaUI]) def test_retire_service_ui(appliance, context, service_vm): """Tests my service Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): service.retire() @pytest.mark.parametrize('context', [ViaUI]) def test_retire_service_on_date(appliance, context, service_vm): """Tests my service retirement Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): dt = datetime.utcnow() service.retire_on_date(dt) @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.meta(blockers=[BZ(1729940)]) def test_crud_set_ownership_and_edit_tags(appliance, context, service_vm): """Tests my service crud , edit tags and ownership Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): service.set_ownership("Administrator", "EvmGroup-administrator") service.add_tag() with update(service): service.description = "my edited description" service.delete() @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.parametrize("filetype", ["Text", "CSV", "PDF"]) # PDF not present on upstream @pytest.mark.uncollectif(lambda filetype: filetype == 'PDF' and not appliance_is_downstream()) def test_download_file(appliance, context, needs_firefox, service_vm, filetype): """Tests my service download files Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/16h tags: service """ service, _ = service_vm with appliance.context.use(context): service.download_file(filetype) @pytest.mark.parametrize('context', [ViaUI]) def test_service_link(appliance, context, service_vm): """Tests service link from VM details page(BZ1443772) Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, vm = service_vm with appliance.context.use(context): # TODO: Update to nav to MyService first to click entity link when widget exists view = navigate_to(vm, 'Details') view.entities.summary('Relationships').click_at('Service') new_view = service.create_view(MyServiceDetailView) assert new_view.wait_displayed() @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.meta(automates=[BZ(1720338)]) def test_retire_service_with_retired_vm(appliance, context, service_vm): """Tests retire service with an already retired vm. Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service Bugzilla: 1720338 """ service, vm = service_vm vm.retire() # using rest entity to check if the VM has retired since it is a lot faster retire_vm = appliance.rest_api.collections.vms.get(name=vm.name) wait_for( lambda: (hasattr(retire_vm, "retired") and retire_vm.retired), timeout=1000, delay=5, fail_func=retire_vm.reload, ) with appliance.context.use(context): service.retire() @pytest.mark.manual @pytest.mark.tier(3) def test_retire_on_date_for_multiple_service(): """ Polarion: assignee: nansari casecomponent: Services testtype: functional initialEstimate: 1/8h startsin: 5.5 tags: service """ pass @pytest.mark.meta(coverage=[1678123]) @pytest.mark.manual @pytest.mark.tier(2) def test_service_state(): """ Bugzilla: 1678123 Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/16h startsin: 5.11 testSteps: 1. Create catalog and catalog item 2. Order the catalog item 3. Provision the service catalog item or fail the service catalog item 4. Go to My services and check service state expectedResults: 1. 2. 3. 4. Service State should be Provisioned or Failed """ pass
Yadnyawalkya/integration_tests
cfme/tests/services/test_myservice.py
cfme/utils/smem_memory_monitor.py
""" Fixtures for Capacity and Utilization """ import fauxfactory import pytest from cfme.utils import conf from cfme.utils.ssh import SSHClient @pytest.fixture(scope="module") def enable_candu(appliance): candu = appliance.collections.candus server_settings = appliance.server.settings original_roles = server_settings.server_roles_db server_settings.enable_server_roles( 'ems_metrics_coordinator', 'ems_metrics_collector', 'ems_metrics_processor' ) server_settings.disable_server_roles( 'automate', 'smartstate' ) candu.enable_all() yield candu.disable_all() server_settings.update_server_roles_db(original_roles) @pytest.fixture(scope="module") def collect_data(appliance, provider, interval='hourly', back='7.days'): """Collect hourly back data for vsphere provider""" vm_name = provider.data['cap_and_util']['chargeback_vm'] # Capture real-time C&U data ret = appliance.ssh_client.run_rails_command( "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\ vm.perf_capture({}, {}.ago.utc, Time.now.utc)\"" .format(provider.id, repr(vm_name), repr(interval), back)) return ret.success @pytest.fixture(scope="module") def enable_candu_category(appliance): """Enable capture C&U Data for tag category location by navigating to the Configuration -> Region page. Click 'Tags' tab , select required company category under 'My Company Categories' and enable 'Capture C & U Data' for the category. """ collection = appliance.collections.categories location_category = collection.instantiate(name="location", display_name="Location") if not location_category.capture_candu: location_category.update(updates={"capture_candu": True}) return location_category @pytest.fixture(scope="function") def candu_tag_vm(provider, enable_candu_category): """Add location tag to VM if not available""" collection = provider.appliance.provider_based_collection(provider) vm = collection.instantiate('cu-24x7', provider) tag = enable_candu_category.collections.tags.instantiate(name="london", display_name="London") vm.add_tag(tag, exists_check=True) return vm @pytest.fixture(scope="module") def temp_appliance_extended_db(temp_appliance_preconfig): app = temp_appliance_preconfig app.evmserverd.stop() app.db.extend_partition() app.evmserverd.start() return app @pytest.fixture(scope="module") def candu_db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get DB backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = "/tmp/db.backup_{}".format(fauxfactory.gen_alphanumeric()) db_storage_ssh.get_file("{}/candu.db.backup".format( conf.cfme_data.bottlenecks.backup_path), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() # When you load a database from an older version of the application, you always need to # run migrations. # https://bugzilla.redhat.com/show_bug.cgi?id=1643250 app.db.migrate() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.evmserverd.start() app.wait_for_web_ui()
# -*- coding: utf-8 -*- from datetime import datetime import pytest from cfme import test_requirements from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import ONE_PER_TYPE from cfme.services.myservice.ui import MyServiceDetailView from cfme.utils import browser from cfme.utils.appliance import ViaUI from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.browser import ensure_browser_open from cfme.utils.update import update from cfme.utils.version import appliance_is_downstream from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.usefixtures('setup_provider', 'catalog_item'), pytest.mark.meta(server_roles="+automate"), pytest.mark.long_running, test_requirements.service, pytest.mark.tier(2), pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, scope="module"), ] @pytest.fixture def needs_firefox(): """ Fixture which skips the test if not run under firefox. I recommend putting it in the first place. """ ensure_browser_open() if browser.browser().name != "firefox": pytest.skip(msg="This test needs firefox to run") @pytest.mark.parametrize('context', [ViaUI]) def test_retire_service_ui(appliance, context, service_vm): """Tests my service Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): service.retire() @pytest.mark.parametrize('context', [ViaUI]) def test_retire_service_on_date(appliance, context, service_vm): """Tests my service retirement Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): dt = datetime.utcnow() service.retire_on_date(dt) @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.meta(blockers=[BZ(1729940)]) def test_crud_set_ownership_and_edit_tags(appliance, context, service_vm): """Tests my service crud , edit tags and ownership Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, _ = service_vm with appliance.context.use(context): service.set_ownership("Administrator", "EvmGroup-administrator") service.add_tag() with update(service): service.description = "my edited description" service.delete() @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.parametrize("filetype", ["Text", "CSV", "PDF"]) # PDF not present on upstream @pytest.mark.uncollectif(lambda filetype: filetype == 'PDF' and not appliance_is_downstream()) def test_download_file(appliance, context, needs_firefox, service_vm, filetype): """Tests my service download files Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/16h tags: service """ service, _ = service_vm with appliance.context.use(context): service.download_file(filetype) @pytest.mark.parametrize('context', [ViaUI]) def test_service_link(appliance, context, service_vm): """Tests service link from VM details page(BZ1443772) Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service """ service, vm = service_vm with appliance.context.use(context): # TODO: Update to nav to MyService first to click entity link when widget exists view = navigate_to(vm, 'Details') view.entities.summary('Relationships').click_at('Service') new_view = service.create_view(MyServiceDetailView) assert new_view.wait_displayed() @pytest.mark.parametrize('context', [ViaUI]) @pytest.mark.meta(automates=[BZ(1720338)]) def test_retire_service_with_retired_vm(appliance, context, service_vm): """Tests retire service with an already retired vm. Metadata: test_flag: provision Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/4h tags: service Bugzilla: 1720338 """ service, vm = service_vm vm.retire() # using rest entity to check if the VM has retired since it is a lot faster retire_vm = appliance.rest_api.collections.vms.get(name=vm.name) wait_for( lambda: (hasattr(retire_vm, "retired") and retire_vm.retired), timeout=1000, delay=5, fail_func=retire_vm.reload, ) with appliance.context.use(context): service.retire() @pytest.mark.manual @pytest.mark.tier(3) def test_retire_on_date_for_multiple_service(): """ Polarion: assignee: nansari casecomponent: Services testtype: functional initialEstimate: 1/8h startsin: 5.5 tags: service """ pass @pytest.mark.meta(coverage=[1678123]) @pytest.mark.manual @pytest.mark.tier(2) def test_service_state(): """ Bugzilla: 1678123 Polarion: assignee: nansari casecomponent: Services initialEstimate: 1/16h startsin: 5.11 testSteps: 1. Create catalog and catalog item 2. Order the catalog item 3. Provision the service catalog item or fail the service catalog item 4. Go to My services and check service state expectedResults: 1. 2. 3. 4. Service State should be Provisioned or Failed """ pass
Yadnyawalkya/integration_tests
cfme/tests/services/test_myservice.py
cfme/fixtures/candu.py
import os import pytest from cfme.fixtures.terminalreporter import reporter from cfme.utils.datafile import data_path_for_filename from cfme.utils.datafile import load_data_file from cfme.utils.path import data_path from cfme.utils.path import log_path # Collection for storing unique combinations of data file paths # and filenames for usage reporting after a completed test run seen_data_files = set() @pytest.fixture(scope="module") def datafile(request): """datafile(filename, replacements) datafile fixture, with templating support Args: filename: filename to load from the data dir replacements: template replacements Returns: Path to the loaded datafile Usage: Given a filename, it will attempt to open the given file from the test's corresponding data dir. For example, this: datafile('testfile') # in tests/subdir/test_module_name.py Would return a file object representing this file: /path/to/cfme_tests/data/subdir/test_module_name/testfile Given a filename with a leading slash, it will attempt to load the file relative to the root of the data dir. For example, this: datafile('/common/testfile') # in tests/subdir/test_module_name.py Would return a file object representing this file: /path/to/cfme_tests/data/common/testfile Note that the test module name is not used with the leading slash. .. rubric:: Templates: This fixture can also handle template replacements. If the datafile being loaded is a python template, the dictionary of replacements can be passed as the 'replacements' keyword argument. In this case, the returned data file will be a NamedTemporaryFile prepopulated with the interpolated result from combining the template with the replacements mapping. * http://docs.python.org/2/library/string.html#template-strings * http://docs.python.org/2/library/tempfile.html#tempfile.NamedTemporaryFile """ return _FixtureDataFile(request) def pytest_addoption(parser): group = parser.getgroup('cfme') group.addoption('--udf-report', action='store_true', default=False, dest='udf_report', help='flag to generate an unused data files report') def pytest_sessionfinish(session, exitstatus): udf_log_file = log_path.join('unused_data_files.log') if udf_log_file.check(): # Clean up old udf log if it exists udf_log_file.remove() if session.config.option.udf_report is False: # Short out here if not making a report return # Output an unused data files log after a test run data_files = set() for dirpath, dirnames, filenames in os.walk(str(data_path)): for filename in filenames: filepath = os.path.join(dirpath, filename) data_files.add(filepath) unused_data_files = data_files - seen_data_files if unused_data_files: # Write the log of unused data files out, minus the data dir prefix udf_log = ''.join( (line[len(str(data_path)):] + '\n' for line in unused_data_files) ) udf_log_file.write(udf_log + '\n') # Throw a notice into the terminal reporter to check the log tr = reporter() tr.write_line('') tr.write_sep( '-', '%d unused data files after test run, check %s' % ( len(unused_data_files), udf_log_file.basename ) ) class _FixtureDataFile(object): def __init__(self, request): self.base_path = str(request.session.fspath) self.testmod_path = str(request.fspath) def __call__(self, filename, replacements=None): if filename.startswith('/'): complete_path = data_path_for_filename( filename.strip('/'), self.base_path) else: complete_path = data_path_for_filename( filename, self.base_path, self.testmod_path) seen_data_files.add(complete_path) return load_data_file(complete_path, replacements)
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/fixtures/datafile.py
import attr from riggerlib import recursive_update from cfme.cloud.instance import Instance from cfme.cloud.instance import InstanceCollection @attr.s class GCEInstance(Instance): # CFME & provider power control options START = "Start" POWER_ON = START # For compatibility with the infra objects. STOP = "Stop" DELETE = "Delete" TERMINATE = 'Delete' # CFME-only power control options SOFT_REBOOT = "Soft Reboot" # Provider-only power control options RESTART = "Restart" # CFME power states STATE_ON = "on" STATE_OFF = "off" STATE_SUSPENDED = "suspended" STATE_TERMINATED = "terminated" STATE_ARCHIVED = "archived" STATE_UNKNOWN = "unknown" @property def ui_powerstates_available(self): return { 'on': [self.STOP, self.SOFT_REBOOT, self.TERMINATE], 'off': [self.START, self.TERMINATE]} @property def ui_powerstates_unavailable(self): return { 'on': [self.START], 'off': [self.STOP, self.SOFT_REBOOT]} @property def vm_default_args(self): """Represents dictionary used for Vm/Instance provision with GCE mandatory default args""" inst_args = super(GCEInstance, self).vm_default_args provisioning = self.provider.data['provisioning'] inst_args['properties']['boot_disk_size'] = provisioning.get('boot_disk_size', '10 GB') return inst_args @property def vm_default_args_rest(self): inst_args = super(GCEInstance, self).vm_default_args_rest provisioning = self.provider.data['provisioning'] recursive_update(inst_args, { 'vm_fields': { 'boot_disk_size': provisioning['boot_disk_size'].replace(' ', '.')}}) return inst_args @attr.s class GCEInstanceCollection(InstanceCollection): ENTITY = GCEInstance
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/cloud/instance/gce.py
"""Module handling report menus contents""" from contextlib import contextmanager import attr from navmazing import NavigateToAttribute from widgetastic.widget import Text from widgetastic_patternfly import Button from cfme.intelligence.reports import CloudIntelReportsView from cfme.intelligence.reports import ReportsMultiBoxSelect from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from widgetastic_manageiq import FolderManager from widgetastic_manageiq import ManageIQTree class AllReportMenusView(CloudIntelReportsView): title = Text("#explorer_title_text") reports_tree = ManageIQTree("menu_roles_treebox") @property def is_displayed(self): return ( self.in_intel_reports and self.title.text == "All EVM Groups" and self.edit_report_menus.is_opened and self.edit_report_menus.tree.currently_selected == ["All EVM Groups"] ) class EditReportMenusView(AllReportMenusView): # Buttons save_button = Button("Save") reset_button = Button("Reset") default_button = Button("Default") cancel_button = Button("Cancel") commit_button = Button("Commit") discard_button = Button("Discard") manager = FolderManager(".//div[@id='folder_lists']/table") report_select = ReportsMultiBoxSelect( move_into="Move selected reports right", move_from="Move selected reports left", available_items="available_reports", chosen_items="selected_reports" ) @property def is_displayed(self): return ( self.in_intel_reports and self.title.text == 'Editing EVM Group "{}"'.format(self.context["object"].group) and self.edit_report_menus.is_opened and self.edit_report_menus.tree.currently_selected == [ "All EVM Groups", self.context["object"].group ] ) @attr.s class ReportMenu(BaseEntity): """ This is a fake class mainly needed for navmazing navigation. """ group = None def go_to_group(self, group_name): self.group = group_name view = navigate_to(self, "Edit") assert view.is_displayed return view def get_folders(self, group): """Returns list of folders for given user group. Args: group: User group to check. """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level") fields = view.manager.fields view.discard_button.click() return fields def get_subfolders(self, group, folder): """Returns list of sub-folders for given user group and folder. Args: group: User group to check. folder: Folder to read. """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level", folder) fields = view.manager.fields view.discard_button.click() return fields def _action(self, action, manager, folder_name): with manager as folder_manager: getattr(folder_manager, action)(folder_name) def add_folder(self, group, folder): """Adds a folder under top-level. Args: group: User group. folder: Name of the new folder. """ self._action("add", self.manage_folder(group), folder) def add_subfolder(self, group, folder, subfolder): """Adds a subfolder under specified folder. Args: group: User group. folder: Name of the folder. subfolder: Name of the new subfolder. """ self._action("add", self.manage_folder(group, folder), subfolder) def remove_folder(self, group, folder): """Removes a folder under top-level. Args: group: User group. folder: Name of the folder. """ self._action("delete", self.manage_folder(group), folder) def remove_subfolder(self, group, folder, subfolder): """Removes a subfolder under specified folder. Args: group: User group. folder: Name of the folder. subfolder: Name of the subfolder. """ self._action("delete", self.manage_folder(group, folder), subfolder) def reset_to_default(self, group): """Clicks the `Default` button. Args: group: Group to set to Default """ view = self.go_to_group(group) view.default_button.click() view.save_button.click() flash_view = self.create_view(AllReportMenusView) assert flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) @contextmanager def manage_subfolder(self, group, folder, subfolder): """Context manager to use when modifying the subfolder contents. You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the changes done inside the with block. Args: group: User group. folder: Parent folder name. subfolder: Subfolder name to manage. Returns: Context-managed :py:class: `widgetastic_manageiq.MultiBoxSelect` instance """ view = self.go_to_group(group) view.reports_tree.click_path("Top Level", folder, subfolder) try: yield view.report_select except FolderManager._BailOut: view.discard_button.click() except Exception: # In case of any exception, nothing will be saved view.discard_button.click() raise # And reraise the exception else: # If no exception happens, save! view.commit_button.click() view.save_button.click() flash_view = self.create_view(AllReportMenusView) flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) @contextmanager def manage_folder(self, group, folder=None): """Context manager to use when modifying the folder contents. You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the changes done inside the with block. This context manager does not give the manager as a value to the with block so you have to import and use the :py:class:`FolderManager` class manually. Args: group: User group. folder: Which folder to manage. If None, top-level will be managed. Returns: Context-managed :py:class:`widgetastic_manageiq.FolderManager` instance """ view = self.go_to_group(group) if folder is None: view.reports_tree.click_path("Top Level") else: view.reports_tree.click_path("Top Level", folder) try: yield view.manager except FolderManager._BailOut: view.manager.discard() except Exception: # In case of any exception, nothing will be saved view.manager.discard() raise # And reraise the exception else: # If no exception happens, save! view.manager.commit() view.save_button.click() flash_view = self.create_view(AllReportMenusView) flash_view.flash.assert_message( 'Report Menu for role "{}" was saved'.format(group) ) def move_reports(self, group, folder, subfolder, *reports): """ Moves a list of reports to a given menu Args: group: User group folder: Parent of the subfolder where reports are to be moved. subfolder: Subfolder under which the reports are to be moved. reports: List of reports that are to be moved. """ reports = list(reports) cancel_view = "" with self.manage_subfolder(group, folder, subfolder) as selected_menu: selected_options = selected_menu.parent_view.report_select.all_options diff = set(selected_options) & set(reports) if diff and (len(diff) == len(reports)): cancel_view = self.create_view(AllReportMenusView) # If all the reports to be moved are already present, raise an exception to exit. raise FolderManager._BailOut # fill method replaces all the options in all_options with the value passed as argument # We do not want to replace any value, we just want to move the new reports to a given # menu. This is a work-around for that purpose. reports.extend(selected_options) selected_menu.parent_view.report_select.fill(reports) if cancel_view: cancel_view.flash.assert_message( 'Edit of Report Menu for role "{}" was cancelled by the user'.format( group ) ) @attr.s class ReportMenusCollection(BaseCollection): """Collection object for the :py:class:'cfme.intelligence.reports.ReportMenu'.""" ENTITY = ReportMenu @navigator.register(ReportMenu, "Edit") class EditReportMenus(CFMENavigateStep): VIEW = EditReportMenusView prerequisite = NavigateToAttribute( "appliance.collections.intel_report_menus", "All" ) def step(self, *args, **kwargs): self.prerequisite_view.edit_report_menus.tree.click_path( "All EVM Groups", self.obj.group ) @navigator.register(ReportMenusCollection, "All") class ReportMenus(CFMENavigateStep): VIEW = AllReportMenusView prerequisite = NavigateToAttribute("appliance.server", "CloudIntelReports") def step(self, *args, **kwargs): self.prerequisite_view.edit_report_menus.tree.click_path("All EVM Groups")
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/intelligence/reports/menus.py
import attr import importscan import sentaku from cfme.generic_objects.definition.button_groups import GenericObjectButtonGroupsCollection from cfme.generic_objects.definition.button_groups import GenericObjectButtonsCollection from cfme.generic_objects.instance import GenericObjectInstanceCollection from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.update import Updateable @attr.s class GenericObjectDefinition(BaseEntity, Updateable, sentaku.modeling.ElementMixin): """Generic Objects Definition class to context switch between UI and REST. Read/Update/Delete functionality. """ _collections = { 'generic_objects': GenericObjectInstanceCollection, 'generic_object_groups_buttons': GenericObjectButtonGroupsCollection, 'generic_object_buttons': GenericObjectButtonsCollection } update = sentaku.ContextualMethod() delete = sentaku.ContextualMethod() exists = sentaku.ContextualProperty() add_button = sentaku.ContextualMethod() add_button_group = sentaku.ContextualMethod() generic_objects = sentaku.ContextualProperty() generic_object_buttons = sentaku.ContextualProperty() instance_count = sentaku.ContextualProperty() name = attr.ib() description = attr.ib() attributes = attr.ib(default=None) # e.g. {'address': 'string'} associations = attr.ib(default=None) # e.g. {'services': 'Service'} methods = attr.ib(default=None) # e.g. ['method1', 'method2'] custom_image_file_path = attr.ib(default=None) rest_response = attr.ib(default=None, init=False) @attr.s class GenericObjectDefinitionCollection(BaseCollection, sentaku.modeling.ElementMixin): ENTITY = GenericObjectDefinition create = sentaku.ContextualMethod() all = sentaku.ContextualMethod() from cfme.generic_objects.definition import rest, ui # NOQA last for import cycles importscan.scan(rest) importscan.scan(ui)
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/generic_objects/definition/__init__.py
from os import path from urllib.error import URLError import attr from cached_property import cached_property from wrapanapi.systems.container import Openshift from cfme.common import Taggable from cfme.common.provider import DefaultEndpoint from cfme.common.vm_console import ConsoleMixin from cfme.containers.provider import ContainersProvider from cfme.containers.provider import ContainersProviderDefaultEndpoint from cfme.containers.provider import ContainersProviderEndpointsForm from cfme.control.explorer.alert_profiles import NodeAlertProfile from cfme.control.explorer.alert_profiles import ProviderAlertProfile from cfme.utils import ssh from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.log import logger from cfme.utils.ocp_cli import OcpCli from cfme.utils.varmeth import variable from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for class CustomAttribute(object): def __init__(self, name, value, field_type=None, href=None): self.name = name self.value = value self.field_type = field_type self.href = href class OpenshiftDefaultEndpoint(ContainersProviderDefaultEndpoint): """Represents Openshift default endpoint""" @staticmethod def get_ca_cert(connection_info): """Getting OpenShift's certificate from the master machine. Args: connection_info (dict): username, password and hostname for OCP returns: certificate's content. """ with ssh.SSHClient(**connection_info) as provider_ssh: _, stdout, _ = provider_ssh.exec_command("cat /etc/origin/master/ca.crt") return str("".join(stdout.readlines())) class ServiceBasedEndpoint(DefaultEndpoint): @property def view_value_mapping(self): out = {'hostname': self.hostname, 'api_port': self.api_port, 'sec_protocol': self.sec_protocol} if out['sec_protocol'] and self.sec_protocol.lower() == 'ssl trusting custom ca': out['trusted_ca_certificates'] = OpenshiftDefaultEndpoint.get_ca_cert( {"username": self.ssh_creds.principal, "password": self.ssh_creds.secret, "hostname": self.master_hostname}) return out class VirtualizationEndpoint(ServiceBasedEndpoint): """Represents virtualization Endpoint""" name = 'virtualization' @property def view_value_mapping(self): # values like host, port are taken from Default endpoint # and not editable in Virtualization endpoint, only token can be added return {'kubevirt_token': self.token} class MetricsEndpoint(ServiceBasedEndpoint): """Represents metrics Endpoint""" name = 'metrics' class AlertsEndpoint(ServiceBasedEndpoint): """Represents Alerts Endpoint""" name = 'alerts' @attr.s(cmp=False) class OpenshiftProvider(ContainersProvider, ConsoleMixin, Taggable): num_route = ['num_route'] STATS_TO_MATCH = ContainersProvider.STATS_TO_MATCH + num_route type_name = "openshift" mgmt_class = Openshift db_types = ["Openshift::ContainerManager"] endpoints_form = ContainersProviderEndpointsForm settings_key = 'ems_openshift' ems_pretty_name = 'OpenShift Container Platform' http_proxy = attr.ib(default=None) adv_http = attr.ib(default=None) adv_https = attr.ib(default=None) no_proxy = attr.ib(default=None) image_repo = attr.ib(default=None) image_reg = attr.ib(default=None) image_tag = attr.ib(default=None) cve_loc = attr.ib(default=None) virt_type = attr.ib(default=None) provider = attr.ib(default=None) def create(self, **kwargs): # Enable alerts collection before adding the provider to avoid missing active # alert after adding the provider # For more info: https://bugzilla.redhat.com/show_bug.cgi?id=1514950 if getattr(self, "alerts_type") == "Prometheus": alert_profiles = self.appliance.collections.alert_profiles provider_profile = alert_profiles.instantiate(ProviderAlertProfile, "Prometheus Provider Profile") node_profile = alert_profiles.instantiate(NodeAlertProfile, "Prometheus node Profile") for profile in [provider_profile, node_profile]: profile.assign_to("The Enterprise") super(OpenshiftProvider, self).create(**kwargs) @cached_property def cli(self): return OcpCli(self) def href(self): return self.appliance.rest_api.collections.providers\ .find_by(name=self.name).resources[0].href @property def view_value_mapping(self): mapping = {'name': self.name, 'zone': self.zone, 'prov_type': ('OpenShift Container Platform' if self.appliance.is_downstream else 'OpenShift')} mapping['metrics_type'] = self.metrics_type mapping['alerts_type'] = self.alerts_type mapping['proxy'] = { 'http_proxy': self.http_proxy } mapping['advanced'] = { 'adv_http': self.adv_http, 'adv_https': self.adv_https, 'no_proxy': self.no_proxy, 'image_repo': self.image_repo, 'image_reg': self.image_reg, 'image_tag': self.image_tag, 'cve_loc': self.cve_loc } mapping['virt_type'] = self.virt_type return mapping @property def is_provider_enabled(self): return self.appliance.rest_api.collections.providers.get(name=self.name).enabled @variable(alias='db') def num_route(self): return self._num_db_generic('container_routes') @num_route.variant('ui') def num_route_ui(self): view = navigate_to(self, "Details") return int(view.entities.summary("Relationships").get_text_of('Container Routes')) @variable(alias='db') def num_template(self): return self._num_db_generic('container_templates') @num_template.variant('ui') def num_template_ui(self): view = navigate_to(self, "Details") return int(view.entities.summary("Relationships").get_text_of("Container Templates")) @classmethod def from_config(cls, prov_config, prov_key, appliance=None): appliance = appliance or cls.appliance endpoints = {} token_creds = cls.process_credential_yaml_key(prov_config['credentials'], cred_type='token') master_hostname = prov_config['endpoints']['default'].hostname ssh_creds = cls.process_credential_yaml_key(prov_config['ssh_creds']) for endp in prov_config['endpoints']: # Add ssh_password for each endpoint, so get_ca_cert # will be able to get SSL cert form OCP for each endpoint setattr(prov_config['endpoints'][endp], "master_hostname", master_hostname) setattr(prov_config['endpoints'][endp], "ssh_creds", ssh_creds) if OpenshiftDefaultEndpoint.name == endp: prov_config['endpoints'][endp]['token'] = token_creds.token endpoints[endp] = OpenshiftDefaultEndpoint(**prov_config['endpoints'][endp]) elif MetricsEndpoint.name == endp: endpoints[endp] = MetricsEndpoint(**prov_config['endpoints'][endp]) elif AlertsEndpoint.name == endp: endpoints[endp] = AlertsEndpoint(**prov_config['endpoints'][endp]) else: raise Exception('Unsupported endpoint type "{}".'.format(endp)) settings = prov_config.get('settings', {}) advanced = settings.get('advanced', {}) http_proxy = settings.get('proxy', {}).get('http_proxy') adv_http, adv_https, no_proxy, image_repo, image_reg, image_tag, cve_loc = [ advanced.get(field) for field in ('adv_http', 'adv_https', 'no_proxy', 'image_repo', 'image_reg', 'image_tag', 'cve_loc') ] return appliance.collections.containers_providers.instantiate( prov_class=cls, name=prov_config.get('name'), key=prov_key, zone=prov_config.get('server_zone'), metrics_type=prov_config.get('metrics_type'), alerts_type=prov_config.get('alerts_type'), endpoints=endpoints, provider_data=prov_config, http_proxy=http_proxy, adv_http=adv_http, adv_https=adv_https, no_proxy=no_proxy, image_repo=image_repo, image_reg=image_reg, image_tag=image_tag, cve_loc=cve_loc, virt_type=prov_config.get('virt_type')) def custom_attributes(self): """returns custom attributes""" response = self.appliance.rest_api.get( path.join(self.href(), 'custom_attributes')) out = [] for attr_dict in response['resources']: attr = self.appliance.rest_api.get(attr_dict['href']) out.append( CustomAttribute( attr['name'], attr['value'], (attr['field_type'] if 'field_type' in attr else None), attr_dict['href'] ) ) return out def add_custom_attributes(self, *custom_attributes): """Adding static custom attributes to provider. Args: custom_attributes: The custom attributes to add. returns: response. """ if not custom_attributes: raise TypeError('{} takes at least 1 argument.' .format(self.add_custom_attributes.__name__)) for c_attr in custom_attributes: if not isinstance(c_attr, CustomAttribute): raise TypeError('All arguments should be of type {}. ({} != {})' .format(CustomAttribute, type(c_attr), CustomAttribute)) payload = { "action": "add", "resources": [{ "name": ca.name, "value": str(ca.value) } for ca in custom_attributes]} for i, fld_tp in enumerate([c_attr.field_type for c_attr in custom_attributes]): if fld_tp: payload['resources'][i]['field_type'] = fld_tp return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def edit_custom_attributes(self, *custom_attributes): """Editing static custom attributes in provider. Args: custom_attributes: The custom attributes to edit. returns: response. """ if not custom_attributes: raise TypeError('{} takes at least 1 argument.' .format(self.edit_custom_attributes.__name__)) for c_attr in custom_attributes: if not isinstance(c_attr, CustomAttribute): raise TypeError('All arguments should be of type {}. ({} != {})' .format(CustomAttribute, type(c_attr), CustomAttribute)) attribs = self.custom_attributes() payload = { "action": "edit", "resources": [{ "href": [c_attr for c_attr in attribs if c_attr.name == ca.name][-1].href, "value": ca.value } for ca in custom_attributes]} return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def delete_custom_attributes(self, *custom_attributes): """Deleting static custom attributes from provider. Args: custom_attributes: The custom attributes to delete. (Could be also names (str)) Returns: response. """ names = [] for c_attr in custom_attributes: attr_type = type(c_attr) if attr_type in (str, CustomAttribute): names.append(c_attr if attr_type is str else c_attr.name) else: raise TypeError('Type of arguments should be either' 'str or CustomAttribute. ({} not in [str, CustomAttribute])' .format(type(c_attr))) attribs = self.custom_attributes() if not names: names = [attrib.name for attrib in attribs] payload = { "action": "delete", "resources": [{ "href": attrib.href, } for attrib in attribs if attrib.name in names]} return self.appliance.rest_api.post( path.join(self.href(), 'custom_attributes'), **payload) def sync_ssl_certificate(self): """ fixture which sync SSL certificate between CFME and OCP Args: provider (OpenShiftProvider): OCP system to sync cert from appliance (IPAppliance): CFME appliance to sync cert with Returns: None """ def _copy_certificate(): is_succeed = True try: # Copy certificate to the appliance provider_ssh.get_file("/etc/origin/master/ca.crt", "/tmp/ca.crt") appliance_ssh.put_file("/tmp/ca.crt", "/etc/pki/ca-trust/source/anchors/{crt}".format( crt=cert_name)) except URLError: logger.debug("Fail to deploy certificate from Openshift to CFME") is_succeed = False finally: return is_succeed provider_ssh = self.cli.ssh_client appliance_ssh = self.appliance.ssh_client() # Connection to the applince in case of dead connection if not appliance_ssh.connected: appliance_ssh.connect() # Checking if SSL is already configured between appliance and provider, # by send a HTTPS request (using SSL) from the appliance to the provider, # hiding the output and sending back the return code of the action _, stdout, stderr = \ appliance_ssh.exec_command( "curl https://{provider}:8443 -sS > /dev/null;echo $?".format( provider=self.provider_data.hostname)) # Do in case of failure (return code is not 0) if stdout.readline().replace('\n', "") != "0": cert_name = "{provider_name}.ca.crt".format( provider_name=self.provider_data.hostname.split(".")[0]) wait_for(_copy_certificate, num_sec=600, delay=30, message="Copy certificate from OCP to CFME") appliance_ssh.exec_command("update-ca-trust") # restarting evemserverd to apply the new SSL certificate self.appliance.evmserverd.restart() self.appliance.evmserverd.wait_for_running() self.appliance.wait_for_web_ui() def get_system_id(self): mgmt_systems_tbl = self.appliance.db.client['ext_management_systems'] return self.appliance.db.client.session.query(mgmt_systems_tbl).filter( mgmt_systems_tbl.name == self.name).first().id def get_metrics(self, **kwargs): """"Returns all the collected metrics for this provider Args: filters: list of dicts with column name and values e.g [{"resource_type": "Container"}, {"parent_ems_id": "1L"}] metrics_table: Metrics table name, there are few metrics table e.g metrics, metric_rollups, etc Returns: Query object with the relevant records """ filters = kwargs.get("filters", {}) metrics_table = kwargs.get("metrics_table", "metric_rollups") metrics_tbl = self.appliance.db.client[metrics_table] mgmt_system_id = self.get_system_id() logger.info("Getting metrics for {name} (parent_ems_id == {id})".format( name=self.name, id=mgmt_system_id)) if filters: logger.info("Filtering by: {f}".format(f=filters)) filters["parent_ems_id"] = mgmt_system_id return self.appliance.db.client.session.query(metrics_tbl).filter_by(**filters) def wait_for_collected_metrics(self, timeout="50m", table_name="metrics"): """Check the db if gathering collection data Args: timeout: timeout in minutes Return: Bool: is collected metrics count is greater than 0 """ def is_collected(): metrics_count = self.get_metrics(table=table_name).count() logger.info("Current metrics found count is {count}".format(count=metrics_count)) return metrics_count > 0 logger.info("Monitoring DB for metrics collection") result = True try: wait_for(is_collected, timeout=timeout, delay=30) except TimedOutError: logger.error( "Timeout exceeded, No metrics found in MIQ DB for the provider \"{name}\"".format( name=self.name)) result = False finally: return result def pause(self): """ Pause the OCP provider. Returns: API response. """ return self.appliance.rest_api.collections.providers.get(name=self.name).action.pause() def resume(self): """ Resume the OCP provider. Returns: API response. """ return self.appliance.rest_api.collections.providers.get(name=self.name).action.resume()
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/containers/provider/openshift.py
"""An example config:: artifactor: log_dir: /home/test/workspace/cfme_tests/artiout per_run: test #test, run, None reuse_dir: True squash_exceptions: False threaded: False server_address: 127.0.0.1 server_port: 21212 server_enabled: True plugins: ``log_dir`` is the destination for all artifacts ``per_run`` denotes if the test artifacts should be group by run, test, or None ``reuse_dir`` if this is False and Artifactor comes across a dir that has already been used, it will die """ import atexit import os import subprocess from threading import RLock import diaper import pytest from artifactor import ArtifactorClient from cfme.fixtures.pytest_store import store from cfme.fixtures.pytest_store import write_line from cfme.markers.polarion import extract_polarion_ids from cfme.utils.appliance import find_appliance from cfme.utils.blockers import Blocker from cfme.utils.blockers import BZ from cfme.utils.conf import credentials from cfme.utils.conf import env from cfme.utils.log import logger from cfme.utils.net import net_check from cfme.utils.net import random_port from cfme.utils.wait import wait_for UNDER_TEST = False # set to true for artifactor using tests # Create a list of all our passwords for use with the sanitize request later in this module # Filter out all Nones as it will mess the output up. words = [word for word in {v.get('password') for v in credentials.values()} if word is not None] def get_test_idents(item): try: return item.location[2], item.location[0] except AttributeError: try: return item.fspath.strpath, None except AttributeError: return (None, None) def get_name(obj): return (getattr(obj, '_param_name', None) or getattr(obj, 'name', None) or str(obj)) class DummyClient(object): def fire_hook(self, *args, **kwargs): return def terminate(self): return def task_status(self): return def __bool__(self): # DummyClient is always False, # so it's easy to see if we have an artiactor client return False def get_client(art_config, pytest_config): if art_config and not UNDER_TEST: port = getattr(pytest_config.option, 'artifactor_port', None) or \ art_config.get('server_port') or random_port() pytest_config.option.artifactor_port = port art_config['server_port'] = port return ArtifactorClient( art_config['server_address'], art_config['server_port']) else: return DummyClient() def spawn_server(config, art_client): if store.slave_manager or UNDER_TEST: return None import subprocess cmd = ['miq-artifactor-server', '--port', str(art_client.port)] if config.getvalue('run_id'): cmd.append('--run-id') cmd.append(str(config.getvalue('run_id'))) proc = subprocess.Popen(cmd) return proc session_ver = None session_build = None session_stream = None session_fw_version = None def pytest_addoption(parser): parser.addoption("--run-id", action="store", default=None, help="A run id to assist in logging") @pytest.hookimpl(tryfirst=True) def pytest_configure(config): if config.getoption('--help'): return art_client = get_client( art_config=env.get('artifactor', {}), pytest_config=config) # just in case if not store.slave_manager: with diaper: atexit.register(shutdown, config) if art_client: config._art_proc = spawn_server(config, art_client) wait_for( net_check, func_args=[art_client.port, '127.0.0.1'], func_kwargs={'force': True}, num_sec=10, message="wait for artifactor to start") art_client.ready = True else: config._art_proc = None from cfme.utils.log import artifactor_handler artifactor_handler.artifactor = art_client if store.slave_manager: artifactor_handler.slaveid = store.slaveid config._art_client = art_client def fire_art_hook(config, hook, **hook_args): client = getattr(config, '_art_client', None) if client is None: assert UNDER_TEST, 'missing artifactor is only valid for inprocess tests' else: return client.fire_hook(hook, **hook_args) def fire_art_test_hook(node, hook, **hook_args): name, location = get_test_idents(node) return fire_art_hook( node.config, hook, test_name=name, test_location=location, **hook_args) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): global session_ver global session_build global session_stream appliance = find_appliance(item) if not session_ver: session_ver = str(appliance.version) session_build = appliance.build session_stream = appliance.version.stream() if str(session_ver) not in session_build: session_build = "{}-{}".format(str(session_ver), session_build) session_fw_version = None try: proc = subprocess.Popen(['git', 'describe', '--tags'], stdout=subprocess.PIPE) proc.wait() session_fw_version = proc.stdout.read().strip() except Exception: pass # already set session_fw_version to None fire_art_hook( item.config, 'session_info', version=session_ver, build=session_build, stream=session_stream, fw_version=session_fw_version ) tier = item.get_closest_marker('tier') if tier: tier = tier.args[0] requirement = item.get_closest_marker('requirement') if requirement: requirement = requirement.args[0] param_dict = {} try: params = item.callspec.params param_dict = {p: get_name(v) for p, v in params.items()} except Exception: pass # already set param_dict ip = appliance.hostname # This pre_start_test hook is needed so that filedump is able to make get the test # object set up before the logger starts logging. As the logger fires a nested hook # to the filedumper, and we can't specify order inriggerlib. meta = item.get_closest_marker('meta') if meta and 'blockers' in meta.kwargs: blocker_spec = meta.kwargs['blockers'] blockers = [] for blocker in blocker_spec: if isinstance(blocker, int): blockers.append(BZ(blocker).url) else: blockers.append(Blocker.parse(blocker).url) else: blockers = [] fire_art_test_hook( item, 'pre_start_test', slaveid=store.slaveid, ip=ip) fire_art_test_hook( item, 'start_test', slaveid=store.slaveid, ip=ip, tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers) yield def pytest_runtest_teardown(item, nextitem): name, location = get_test_idents(item) app = find_appliance(item) ip = app.hostname fire_art_test_hook( item, 'finish_test', slaveid=store.slaveid, ip=ip, wait_for_task=True) fire_art_test_hook(item, 'sanitize', words=words) jenkins_data = { 'build_url': os.environ.get('BUILD_URL'), 'build_number': os.environ.get('BUILD_NUMBER'), 'git_commit': os.environ.get('GIT_COMMIT'), 'job_name': os.environ.get('JOB_NAME') } param_dict = None try: caps = app.browser.widgetastic.selenium.capabilities param_dict = { 'browserName': caps.get('browserName', 'Unknown'), 'browserPlatform': caps.get('platformName', caps.get('platform', 'Unknown')), 'browserVersion': caps.get('browserVersion', caps.get('version', 'Unknown')) } except Exception: logger.exception("Couldn't grab browser env_vars") pass # already set param_dict fire_art_test_hook( item, 'ostriz_send', env_params=param_dict, slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data) def pytest_runtest_logreport(report): if store.slave_manager: return # each node does its own reporting config = store.config # tech debt name, location = get_test_idents(report) xfail = hasattr(report, 'wasxfail') if hasattr(report, 'skipped'): if report.skipped: fire_art_hook( config, 'filedump', test_location=location, test_name=name, description="Short traceback", contents=report.longreprtext, file_type="short_tb", group_id="skipped") fire_art_hook( config, 'report_test', test_location=location, test_name=name, test_xfail=xfail, test_when=report.when, test_outcome=report.outcome, test_phase_duration=report.duration) fire_art_hook(config, 'build_report') @pytest.hookimpl(hookwrapper=True) def pytest_unconfigure(config): yield shutdown(config) lock = RLock() def shutdown(config): app = find_appliance(config, require=False) if app is not None: with lock: proc = config._art_proc if proc and proc.returncode is None: if not store.slave_manager: write_line('collecting artifacts') fire_art_hook(config, 'finish_session') if not store.slave_manager: config._art_client.terminate() proc.wait()
"""This module contains control REST API specific tests.""" import fauxfactory import pytest from manageiq_client.api import APIException from cfme import test_requirements from cfme.rest.gen_data import conditions as _conditions from cfme.rest.gen_data import policies as _policies from cfme.utils.rest import assert_response from cfme.utils.rest import delete_resources_from_collection from cfme.utils.rest import delete_resources_from_detail from cfme.utils.rest import query_resource_attributes from cfme.utils.wait import wait_for pytestmark = [ test_requirements.rest, ] class TestConditionsRESTAPI(object): @pytest.fixture(scope='function') def conditions(self, request, appliance): num_conditions = 2 response = _conditions(request, appliance, num=num_conditions) assert_response(appliance) assert len(response) == num_conditions return response def test_query_condition_attributes(self, conditions, soft_assert): """Tests access to condition attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: medium initialEstimate: 1/4h """ query_resource_attributes(conditions[0], soft_assert=soft_assert) def test_create_conditions(self, appliance, conditions): """Tests create conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for condition in conditions: record = appliance.rest_api.collections.conditions.get(id=condition.id) assert record.description == condition.description @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE']) def test_delete_conditions_from_detail(self, conditions, method): """Tests delete conditions from detail. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5) def test_delete_conditions_from_collection(self, conditions): """Tests delete conditions from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(conditions, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_conditions(self, conditions, appliance, from_detail): """Tests edit conditions. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_conditions = len(conditions) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)] new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_conditions): edited.append(conditions[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_conditions): new[index].update(conditions[index]._ref_repr()) edited = appliance.rest_api.collections.conditions.action.edit(*new) assert_response(appliance) assert len(edited) == num_conditions for index, condition in enumerate(conditions): record, __ = wait_for( lambda: appliance.rest_api.collections.conditions.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a test condition" ) condition.reload() assert condition.description == edited[index].description == record[0].description class TestPoliciesRESTAPI(object): @pytest.fixture(scope='function') def policies(self, request, appliance): num_policies = 2 response = _policies(request, appliance, num=num_policies) assert_response(appliance) assert len(response) == num_policies return response def test_query_policy_attributes(self, policies, soft_assert): """Tests access to policy attributes. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ query_resource_attributes(policies[0], soft_assert=soft_assert) def test_create_policies(self, appliance, policies): """Tests create policies. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ for policy in policies: record = appliance.rest_api.collections.policies.get(id=policy.id) assert record.description == policy.description def test_delete_policies_from_detail_post(self, policies): """Tests delete policies from detail using POST method. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5) def test_delete_policies_from_detail_delete(self, policies): """Tests delete policies from detail using DELETE method. Bugzilla: 1435773 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5) def test_delete_policies_from_collection(self, policies): """Tests delete policies from collection. Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ delete_resources_from_collection(policies, num_sec=100, delay=5) @pytest.mark.parametrize( 'from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_policies(self, policies, appliance, from_detail): """Tests edit policies. Testing BZ 1435777 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: low initialEstimate: 1/4h """ num_policies = len(policies) uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)] new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq] if from_detail: edited = [] for index in range(num_policies): edited.append(policies[index].action.edit(**new[index])) assert_response(appliance) else: for index in range(num_policies): new[index].update(policies[index]._ref_repr()) edited = appliance.rest_api.collections.policies.action.edit(*new) assert_response(appliance) assert len(edited) == num_policies for index, policy in enumerate(policies): record, __ = wait_for( lambda: appliance.rest_api.collections.policies.find_by( description=new[index]['description']) or False, num_sec=100, delay=5, message="Find a policy" ) policy.reload() assert policy.description == edited[index].description == record[0].description def test_create_invalid_policies(self, appliance): """ This test case checks policy creation with invalid data. Bugzilla: 1435780 Metadata: test_flag: rest Polarion: assignee: pvala casecomponent: Control caseimportance: high initialEstimate: 1/30h """ policy_name = fauxfactory.gen_alphanumeric(5) data = { "name": "test_policy_{}".format(policy_name), "description": "Test Policy {}".format(policy_name), "mode": "bar", "towhat": "baz", "conditions_ids": [2000, 3000], "policy_contents": [{ "event_id": 2, "actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}] }], } with pytest.raises(APIException, match="Api::BadRequestError"): appliance.rest_api.collections.policies.action.create(data)
izapolsk/integration_tests
cfme/tests/control/test_rest_control.py
cfme/fixtures/artifactor_plugin.py
""" implement the TimedeltaIndex """ from datetime import timedelta import numpy as np from pandas.core.dtypes.common import ( _TD_DTYPE, is_integer, is_float, is_bool_dtype, is_list_like, is_scalar, is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype, _ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries from pandas.core.common import _maybe_box, _values_from_object from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat from pandas.compat import u from pandas.tseries.frequencies import to_offset from pandas.core.algorithms import checked_add_with_arr from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com import pandas.core.dtypes.concat as _concat from pandas.util._decorators import Appender, Substitution, deprecate_kwarg from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) from pandas.tseries.offsets import Tick, DateOffset from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timedelta, NaT, iNaT) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._libs.tslibs.fields import get_timedelta_field def _field_accessor(name, alias, docstring=None): def f(self): values = self.asi8 result = get_timedelta_field(values, alias) if self.hasnans: result = self._maybe_mask_results(result, convert='float64') return Index(result, name=self.name) f.__name__ = name f.__doc__ = docstring return property(f) def _td_index_cmp(opname, cls, nat_result=False): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ def wrapper(self, other): msg = "cannot compare a TimedeltaIndex with type {0}" func = getattr(super(TimedeltaIndex, self), opname) if _is_convertible_to_td(other) or other is NaT: try: other = _to_m8(other) except ValueError: # failed to parse as timedelta raise TypeError(msg.format(type(other))) result = func(other) if isna(other): result.fill(nat_result) else: if not is_list_like(other): raise TypeError(msg.format(type(other))) other = TimedeltaIndex(other).values result = func(other) result = _values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == iNaT else: o_mask = other.view('i8') == iNaT if o_mask.any(): result[o_mask] = nat_result if self.hasnans: result[self._isnan] = nat_result # support of bool dtype indexers if is_bool_dtype(result): return result return Index(result) return compat.set_function_name(wrapper, opname, cls) class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index): """ Immutable ndarray of timedelta64 data, represented internally as int64, and which can be boxed to timedelta objects Parameters ---------- data : array-like (1-dimensional), optional Optional timedelta-like data to construct index with unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional which is an integer/float number freq: a frequency for the index, optional copy : bool Make a copy of input ndarray start : starting value, timedelta-like, optional If data is None, start is used as the start point in generating regular timedelta data. periods : int, optional, > 0 Number of periods to generate, if generating index. Takes precedence over end argument end : end time, timedelta-like, optional If periods is none, generated index will extend to first conforming time on or just past end argument closed : string or None, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) name : object Name to be stored in the index Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. See Also --------- Index : The base pandas Index type Timedelta : Represents a duration between two dates or times. DatetimeIndex : Index of datetime64 data PeriodIndex : Index of Period data Attributes ---------- days seconds microseconds nanoseconds components inferred_freq Methods ------- to_pytimedelta to_series round floor ceil to_frame """ _typ = 'timedeltaindex' _join_precedence = 10 def _join_i8_wrapper(joinf, **kwargs): return DatetimeIndexOpsMixin._join_i8_wrapper( joinf, dtype='m8[ns]', **kwargs) _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64) _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64) _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64) _left_indexer_unique = _join_i8_wrapper( libjoin.left_join_indexer_unique_int64, with_indexers=False) _arrmap = None # define my properties & methods for delegation _other_ops = [] _bool_ops = [] _object_ops = ['freq'] _field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds'] _datetimelike_ops = _field_ops + _object_ops + _bool_ops _datetimelike_methods = ["to_pytimedelta", "total_seconds", "round", "floor", "ceil"] @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ cls.__eq__ = _td_index_cmp('__eq__', cls) cls.__ne__ = _td_index_cmp('__ne__', cls, nat_result=True) cls.__lt__ = _td_index_cmp('__lt__', cls) cls.__gt__ = _td_index_cmp('__gt__', cls) cls.__le__ = _td_index_cmp('__le__', cls) cls.__ge__ = _td_index_cmp('__ge__', cls) _engine_type = libindex.TimedeltaEngine _comparables = ['name', 'freq'] _attributes = ['name', 'freq'] _is_numeric_dtype = True _infer_as_myclass = True freq = None def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, periods=None, copy=False, name=None, closed=None, verify_integrity=True, **kwargs): if isinstance(data, TimedeltaIndex) and freq is None and name is None: if copy: return data.copy() else: return data._shallow_copy() freq_infer = False if not isinstance(freq, DateOffset): # if a passed freq is None, don't infer automatically if freq != 'infer': freq = to_offset(freq) else: freq_infer = True freq = None if periods is not None: if is_float(periods): periods = int(periods) elif not is_integer(periods): msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) if data is None and freq is None: raise ValueError("Must provide freq argument if no data is " "supplied") if data is None: return cls._generate(start, end, periods, name, freq, closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): raise ValueError('TimedeltaIndex() must be called with a ' 'collection of some kind, %s was passed' % repr(data)) # convert if not already if getattr(data, 'dtype', None) != _TD_DTYPE: data = to_timedelta(data, unit=unit, box=False) elif copy: data = np.array(data, copy=True) # check that we are matching freqs if verify_integrity and len(data) > 0: if freq is not None and not freq_infer: index = cls._simple_new(data, name=name) inferred = index.inferred_freq if inferred != freq.freqstr: on_freq = cls._generate( index[0], None, len(index), name, freq) if not np.array_equal(index.asi8, on_freq.asi8): raise ValueError('Inferred frequency {0} from passed ' 'timedeltas does not conform to ' 'passed frequency {1}' .format(inferred, freq.freqstr)) index.freq = freq return index if freq_infer: index = cls._simple_new(data, name=name) inferred = index.inferred_freq if inferred: index.freq = to_offset(inferred) return index return cls._simple_new(data, name=name, freq=freq) @classmethod def _generate(cls, start, end, periods, name, offset, closed=None): if com._count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and ' 'periods, exactly two must be specified') if start is not None: start = Timedelta(start) if end is not None: end = Timedelta(end) left_closed = False right_closed = False if start is None and end is None: if closed is not None: raise ValueError("Closed has to be None if not both of start" "and end are defined") if closed is None: left_closed = True right_closed = True elif closed == "left": left_closed = True elif closed == "right": right_closed = True else: raise ValueError("Closed has to be either 'left', 'right' or None") index = _generate_regular_range(start, end, periods, offset) index = cls._simple_new(index, name=name, freq=offset) if not left_closed: index = index[1:] if not right_closed: index = index[:-1] return index @property def _box_func(self): return lambda x: Timedelta(x, unit='ns') @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): values = np.array(values, copy=False) if values.dtype == np.object_: values = array_to_timedelta64(values) if values.dtype != _TD_DTYPE: values = _ensure_int64(values).view(_TD_DTYPE) result = object.__new__(cls) result._data = values result.name = name result.freq = freq result._reset_identity() return result @property def _formatter_func(self): from pandas.io.formats.format import _get_format_timedelta64 return _get_format_timedelta64(self, box=True) def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): super(TimedeltaIndex, self).__setstate__(state) else: raise Exception("invalid pickle state") _unpickle_compat = __setstate__ def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) if freq is not None: # no need to infer if freq is None attrs['freq'] = 'infer' return attrs def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) name = self.name elif isinstance(delta, TimedeltaIndex): new_values = self._add_delta_tdi(delta) # update name when delta is index name = com._maybe_match_name(self, delta) else: raise ValueError("cannot add the type {0} to a TimedeltaIndex" .format(type(delta))) result = TimedeltaIndex(new_values, freq='infer', name=name) return result def _evaluate_with_timedelta_like(self, other, op, opstr): # allow division by a timedelta if opstr in ['__div__', '__truediv__', '__floordiv__']: if _is_convertible_to_td(other): other = Timedelta(other) if isna(other): raise NotImplementedError( "division by pd.NaT not implemented") i8 = self.asi8 if opstr in ['__floordiv__']: result = i8 // other.value else: result = op(i8, float(other.value)) result = self._maybe_mask_results(result, convert='float64') return Index(result, name=self.name, copy=False) return NotImplemented def _add_datelike(self, other): # adding a timedeltaindex to a datetimelike from pandas import Timestamp, DatetimeIndex if other is NaT: result = self._nat_new(box=False) else: other = Timestamp(other) i8 = self.asi8 result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) return DatetimeIndex(result, name=self.name, copy=False) def _sub_datelike(self, other): from pandas import DatetimeIndex if other is NaT: result = self._nat_new(box=False) else: raise TypeError("cannot subtract a datelike from a TimedeltaIndex") return DatetimeIndex(result, name=self.name, copy=False) def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): from pandas.io.formats.format import Timedelta64Formatter return Timedelta64Formatter(values=self, nat_rep=na_rep, justify='all').get_result() days = _field_accessor("days", "days", " Number of days for each element. ") seconds = _field_accessor("seconds", "seconds", " Number of seconds (>= 0 and less than 1 day) " "for each element. ") microseconds = _field_accessor("microseconds", "microseconds", "\nNumber of microseconds (>= 0 and less " "than 1 second) for each\nelement. ") nanoseconds = _field_accessor("nanoseconds", "nanoseconds", "\nNumber of nanoseconds (>= 0 and less " "than 1 microsecond) for each\nelement.\n") @property def components(self): """ Return a dataframe of the components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns ------- a DataFrame """ from pandas import DataFrame columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds'] hasnans = self.hasnans if hasnans: def f(x): if isna(x): return [np.nan] * len(columns) return x.components else: def f(x): return x.components result = DataFrame([f(x) for x in self]) result.columns = columns if not hasnans: result = result.astype('int64') return result def total_seconds(self): """ Total duration of each element expressed in seconds. """ return Index(self._maybe_mask_results(1e-9 * self.asi8), name=self.name) def to_pytimedelta(self): """ Return TimedeltaIndex as object ndarray of datetime.timedelta objects Returns ------- datetimes : ndarray """ return libts.ints_to_pytimedelta(self.asi8) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype): # return an index (essentially this is division) result = self.values.astype(dtype, copy=copy) if self.hasnans: values = self._maybe_mask_results(result, convert='float64') return Index(values, name=self.name) return Index(result.astype('i8'), name=self.name) return super(TimedeltaIndex, self).astype(dtype, copy=copy) def union(self, other): """ Specialized union for TimedeltaIndex objects. If combine overlapping ranges with the same DateOffset, will be much faster than Index.union Parameters ---------- other : TimedeltaIndex or array-like Returns ------- y : Index or TimedeltaIndex """ self._assert_can_do_setop(other) if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) except (TypeError, ValueError): pass this, other = self, other if this._can_fast_union(other): return this._fast_union(other) else: result = Index.union(this, other) if isinstance(result, TimedeltaIndex): if result.freq is None: result.freq = to_offset(result.inferred_freq) return result def join(self, other, how='left', level=None, return_indexers=False, sort=False): """ See Index.join """ if _is_convertible_to_index(other): try: other = TimedeltaIndex(other) except (TypeError, ValueError): pass return Index.join(self, other, how=how, level=level, return_indexers=return_indexers, sort=sort) def _wrap_joined_index(self, joined, other): name = self.name if self.name == other.name else None if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and self._can_fast_union(other)): joined = self._shallow_copy(joined, name=name) return joined else: return self._simple_new(joined, name) def _can_fast_union(self, other): if not isinstance(other, TimedeltaIndex): return False freq = self.freq if freq is None or freq != other.freq: return False if not self.is_monotonic or not other.is_monotonic: return False if len(self) == 0 or len(other) == 0: return True # to make our life easier, "sort" the two ranges if self[0] <= other[0]: left, right = self, other else: left, right = other, self right_start = right[0] left_end = left[-1] # Only need to "adjoin", not overlap return (right_start == left_end + freq) or right_start in left def _fast_union(self, other): if len(other) == 0: return self.view(type(self)) if len(self) == 0: return other.view(type(self)) # to make our life easier, "sort" the two ranges if self[0] <= other[0]: left, right = self, other else: left, right = other, self left_end = left[-1] right_end = right[-1] # concatenate if left_end < right_end: loc = right.searchsorted(left_end, side='right') right_chunk = right.values[loc:] dates = _concat._concat_compat((left.values, right_chunk)) return self._shallow_copy(dates) else: return left def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None return self._simple_new(result, name=name, freq=None) def intersection(self, other): """ Specialized intersection for TimedeltaIndex objects. May be much faster than Index.intersection Parameters ---------- other : TimedeltaIndex or array-like Returns ------- y : Index or TimedeltaIndex """ self._assert_can_do_setop(other) if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) except (TypeError, ValueError): pass result = Index.intersection(self, other) return result if len(self) == 0: return self if len(other) == 0: return other # to make our life easier, "sort" the two ranges if self[0] <= other[0]: left, right = self, other else: left, right = other, self end = min(left[-1], right[-1]) start = right[0] if end < start: return type(self)(data=[]) else: lslice = slice(*left.slice_locs(start, end)) left_chunk = left.values[lslice] return self._shallow_copy(left_chunk) def _maybe_promote(self, other): if other.inferred_type == 'timedelta': other = TimedeltaIndex(other) return self, other def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ if _is_convertible_to_td(key): key = Timedelta(key) return self.get_value_maybe_box(series, key) try: return _maybe_box(self, Index.get_value(self, series, key), series, key) except KeyError: try: loc = self._get_string_slice(key) return series[loc] except (TypeError, ValueError, KeyError): pass try: return self.get_value_maybe_box(series, key) except (TypeError, ValueError, KeyError): raise KeyError(key) def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) values = self._engine.get_value(_values_from_object(series), key) return _maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label Returns ------- loc : int """ if is_list_like(key): raise TypeError if isna(key): key = NaT if tolerance is not None: # try converting tolerance now, so errors don't get swallowed by # the try/except clauses below tolerance = self._convert_tolerance(tolerance, np.asarray(key)) if _is_convertible_to_td(key): key = Timedelta(key) return Index.get_loc(self, key, method, tolerance) try: return Index.get_loc(self, key, method, tolerance) except (KeyError, ValueError, TypeError): try: return self._get_string_slice(key) except (TypeError, KeyError, ValueError): pass try: stamp = Timedelta(key) return Index.get_loc(self, stamp, method, tolerance) except (KeyError, ValueError): raise KeyError(key) def _maybe_cast_slice_bound(self, label, side, kind): """ If label is a string, cast it to timedelta according to resolution. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} Returns ------- label : object """ assert kind in ['ix', 'loc', 'getitem', None] if isinstance(label, compat.string_types): parsed = _coerce_scalar_to_timedelta_type(label, box=True) lbound = parsed.round(parsed.resolution) if side == 'left': return lbound else: return (lbound + to_offset(parsed.resolution) - Timedelta(1, 'ns')) elif is_integer(label) or is_float(label): self._invalid_indexer('slice', label) return label def _get_string_slice(self, key, use_lhs=True, use_rhs=True): freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) if is_integer(key) or is_float(key) or key is NaT: self._invalid_indexer('slice', key) loc = self._partial_td_slice(key, freq, use_lhs=use_lhs, use_rhs=use_rhs) return loc def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True): # given a key, try to figure out a location for a partial slice if not isinstance(key, compat.string_types): return key raise NotImplementedError # TODO(wesm): dead code # parsed = _coerce_scalar_to_timedelta_type(key, box=True) # is_monotonic = self.is_monotonic # # figure out the resolution of the passed td # # and round to it # # t1 = parsed.round(reso) # t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns') # stamps = self.asi8 # if is_monotonic: # # we are out of range # if (len(stamps) and ((use_lhs and t1.value < stamps[0] and # t2.value < stamps[0]) or # ((use_rhs and t1.value > stamps[-1] and # t2.value > stamps[-1])))): # raise KeyError # # a monotonic (sorted) series can be sliced # left = (stamps.searchsorted(t1.value, side='left') # if use_lhs else None) # right = (stamps.searchsorted(t2.value, side='right') # if use_rhs else None) # return slice(left, right) # lhs_mask = (stamps >= t1.value) if use_lhs else True # rhs_mask = (stamps <= t2.value) if use_rhs else True # # try to find a the dates # return (lhs_mask & rhs_mask).nonzero()[0] @Substitution(klass='TimedeltaIndex') @Appender(_shared_docs['searchsorted']) @deprecate_kwarg(old_arg_name='key', new_arg_name='value') def searchsorted(self, value, side='left', sorter=None): if isinstance(value, (np.ndarray, Index)): value = np.array(value, dtype=_TD_DTYPE, copy=False) else: value = _to_m8(value) return self.values.searchsorted(value, side=side, sorter=sorter) def is_type_compatible(self, typ): return typ == self.inferred_type or typ == 'timedelta' @property def inferred_type(self): return 'timedelta64' @property def dtype(self): return _TD_DTYPE @property def is_all_dates(self): return True def insert(self, loc, item): """ Make new Index inserting new item at location Parameters ---------- loc : int item : object if not either a Python datetime or a numpy integer-like, returned Index dtype will be object rather than datetime. Returns ------- new_index : Index """ # try to convert if possible if _is_convertible_to_td(item): try: item = Timedelta(item) except Exception: pass elif is_scalar(item) and isna(item): # GH 18295 item = self._na_value freq = None if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)): # check freq can be preserved on edge cases if self.freq is not None: if ((loc == 0 or loc == -len(self)) and item + self.freq == self[0]): freq = self.freq elif (loc == len(self)) and item - self.freq == self[-1]: freq = self.freq item = _to_m8(item) try: new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)) return TimedeltaIndex(new_tds, name=self.name, freq=freq) except (AttributeError, TypeError): # fall back to object index if isinstance(item, compat.string_types): return self.astype(object).insert(loc, item) raise TypeError( "cannot insert TimedeltaIndex with incompatible label") def delete(self, loc): """ Make a new DatetimeIndex with passed location(s) deleted. Parameters ---------- loc: int, slice or array of ints Indicate which sub-arrays to remove. Returns ------- new_index : TimedeltaIndex """ new_tds = np.delete(self.asi8, loc) freq = 'infer' if is_integer(loc): if loc in (0, -len(self), -1, len(self) - 1): freq = self.freq else: if is_list_like(loc): loc = lib.maybe_indices_to_slice( _ensure_int64(np.array(loc)), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if (loc.start in (0, None) or loc.stop in (len(self), None)): freq = self.freq return TimedeltaIndex(new_tds, name=self.name, freq=freq) TimedeltaIndex._add_comparison_methods() TimedeltaIndex._add_numeric_methods() TimedeltaIndex._add_logical_methods_disabled() TimedeltaIndex._add_datetimelike_methods() def _is_convertible_to_index(other): """ return a boolean whether I can attempt conversion to a TimedeltaIndex """ if isinstance(other, TimedeltaIndex): return True elif (len(other) > 0 and other.inferred_type not in ('floating', 'mixed-integer', 'integer', 'mixed-integer-float', 'mixed')): return True return False def _is_convertible_to_td(key): return isinstance(key, (DateOffset, timedelta, Timedelta, np.timedelta64, compat.string_types)) def _to_m8(key): """ Timedelta-like => dt64 """ if not isinstance(key, Timedelta): # this also converts strings key = Timedelta(key) # return an type that can be compared return np.int64(key.value).view(_TD_DTYPE) def _generate_regular_range(start, end, periods, offset): stride = offset.nanos if periods is None: b = Timedelta(start).value e = Timedelta(end).value e += stride - e % stride elif start is not None: b = Timedelta(start).value e = b + periods * stride elif end is not None: e = Timedelta(end).value + stride b = e - periods * stride else: raise ValueError("at least 'start' or 'end' should be specified " "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) data = TimedeltaIndex._simple_new(data, None) return data def timedelta_range(start=None, end=None, periods=None, freq='D', name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default frequency Parameters ---------- start : string or timedelta-like, default None Left bound for generating timedeltas end : string or timedelta-like, default None Right bound for generating timedeltas periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'D' (calendar daily) Frequency strings can have multiples, e.g. '5H' name : string, default None Name of the resulting TimedeltaIndex closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Returns ------- rng : TimedeltaIndex Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') """ return TimedeltaIndex(start=start, end=end, periods=periods, freq=freq, name=name, closed=closed)
from __future__ import division import pytest import numpy as np from datetime import timedelta from pandas import ( Interval, IntervalIndex, Timestamp, Timedelta, DateOffset, interval_range, date_range, timedelta_range) from pandas.tseries.offsets import Day import pandas.util.testing as tm import pandas as pd @pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) def closed(request): return request.param @pytest.fixture(scope='class', params=[None, 'foo']) def name(request): return request.param class TestIntervalRange(object): def test_construction_from_numeric(self, closed, name): # combinations of start/end/periods without freq expected = IntervalIndex.from_breaks( np.arange(0, 6), name=name, closed=closed) result = interval_range(start=0, end=5, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=0, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=5, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) # combinations of start/end/periods with freq expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)], name=name, closed=closed) result = interval_range(start=0, end=6, freq=2, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=0, periods=3, freq=2, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=6, periods=3, freq=2, name=name, closed=closed) tm.assert_index_equal(result, expected) # output truncates early if freq causes end to be skipped. expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)], name=name, closed=closed) result = interval_range(start=0, end=4, freq=1.5, name=name, closed=closed) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tz', [None, 'US/Eastern']) def test_construction_from_timestamp(self, closed, name, tz): # combinations of start/end/periods without freq start = Timestamp('2017-01-01', tz=tz) end = Timestamp('2017-01-06', tz=tz) breaks = date_range(start=start, end=end) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) result = interval_range(start=start, end=end, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=start, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=end, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) # combinations of start/end/periods with fixed freq freq = '2D' start = Timestamp('2017-01-01', tz=tz) end = Timestamp('2017-01-07', tz=tz) breaks = date_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=start, periods=3, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=end, periods=3, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) # output truncates early if freq causes end to be skipped. end = Timestamp('2017-01-08', tz=tz) result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) # combinations of start/end/periods with non-fixed freq freq = 'M' start = Timestamp('2017-01-01', tz=tz) end = Timestamp('2017-12-31', tz=tz) breaks = date_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=start, periods=11, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=end, periods=11, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) # output truncates early if freq causes end to be skipped. end = Timestamp('2018-01-15', tz=tz) result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) def test_construction_from_timedelta(self, closed, name): # combinations of start/end/periods without freq start, end = Timedelta('1 day'), Timedelta('6 days') breaks = timedelta_range(start=start, end=end) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) result = interval_range(start=start, end=end, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=start, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=end, periods=5, name=name, closed=closed) tm.assert_index_equal(result, expected) # combinations of start/end/periods with fixed freq freq = '2D' start, end = Timedelta('1 day'), Timedelta('7 days') breaks = timedelta_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(start=start, periods=3, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) result = interval_range(end=end, periods=3, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) # output truncates early if freq causes end to be skipped. end = Timedelta('7 days 1 hour') result = interval_range(start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) def test_constructor_coverage(self): # float value for periods expected = pd.interval_range(start=0, periods=10) result = pd.interval_range(start=0, periods=10.5) tm.assert_index_equal(result, expected) # equivalent timestamp-like start/end start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15') expected = pd.interval_range(start=start, end=end) result = pd.interval_range(start=start.to_pydatetime(), end=end.to_pydatetime()) tm.assert_index_equal(result, expected) result = pd.interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timestamp equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1), DateOffset(days=1)] for freq in equiv_freq: result = pd.interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) # equivalent timedelta-like start/end start, end = Timedelta(days=1), Timedelta(days=10) expected = pd.interval_range(start=start, end=end) result = pd.interval_range(start=start.to_pytimedelta(), end=end.to_pytimedelta()) tm.assert_index_equal(result, expected) result = pd.interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timedelta equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)] for freq in equiv_freq: result = pd.interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) def test_errors(self): # not enough params msg = ('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') with tm.assert_raises_regex(ValueError, msg): interval_range(start=0) with tm.assert_raises_regex(ValueError, msg): interval_range(end=5) with tm.assert_raises_regex(ValueError, msg): interval_range(periods=2) with tm.assert_raises_regex(ValueError, msg): interval_range() # too many params with tm.assert_raises_regex(ValueError, msg): interval_range(start=0, end=5, periods=6) # mixed units msg = 'start, end, freq need to be type compatible' with tm.assert_raises_regex(TypeError, msg): interval_range(start=0, end=Timestamp('20130101'), freq=2) with tm.assert_raises_regex(TypeError, msg): interval_range(start=0, end=Timedelta('1 day'), freq=2) with tm.assert_raises_regex(TypeError, msg): interval_range(start=0, end=10, freq='D') with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timestamp('20130101'), end=10, freq='D') with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timestamp('20130101'), end=Timedelta('1 day'), freq='D') with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timestamp('20130101'), end=Timestamp('20130110'), freq=2) with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timedelta('1 day'), end=10, freq='D') with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timedelta('1 day'), end=Timestamp('20130110'), freq='D') with tm.assert_raises_regex(TypeError, msg): interval_range(start=Timedelta('1 day'), end=Timedelta('10 days'), freq=2) # invalid periods msg = 'periods must be a number, got foo' with tm.assert_raises_regex(TypeError, msg): interval_range(start=0, periods='foo') # invalid start msg = 'start must be numeric or datetime-like, got foo' with tm.assert_raises_regex(ValueError, msg): interval_range(start='foo', periods=10) # invalid end msg = r'end must be numeric or datetime-like, got \(0, 1\]' with tm.assert_raises_regex(ValueError, msg): interval_range(end=Interval(0, 1), periods=10) # invalid freq for datetime-like msg = 'freq must be numeric or convertible to DateOffset, got foo' with tm.assert_raises_regex(ValueError, msg): interval_range(start=0, end=10, freq='foo') with tm.assert_raises_regex(ValueError, msg): interval_range(start=Timestamp('20130101'), periods=10, freq='foo') with tm.assert_raises_regex(ValueError, msg): interval_range(end=Timedelta('1 day'), periods=10, freq='foo') # mixed tz start = Timestamp('2017-01-01', tz='US/Eastern') end = Timestamp('2017-01-07', tz='US/Pacific') msg = 'Start and end cannot both be tz-aware with different timezones' with tm.assert_raises_regex(TypeError, msg): interval_range(start=start, end=end)
zfrenchee/pandas
pandas/tests/indexes/interval/test_interval_range.py
pandas/core/indexes/timedeltas.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.PureUGen import PureUGen class VOsc3(PureUGen): r''' :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3 VOsc3.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = None __slots__ = () _ordered_input_names = ( 'bufpos', 'freq_1', 'freq_2', 'freq_3', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, bufpos=None, freq_1=110, freq_2=220, freq_3=440, ): PureUGen.__init__( self, calculation_rate=calculation_rate, bufpos=bufpos, freq_1=freq_1, freq_2=freq_2, freq_3=freq_3, ) ### PUBLIC METHODS ### @classmethod def ar( cls, bufpos=None, freq_1=110, freq_2=220, freq_3=440, ): r'''Constructs an audio-rate VOsc3. :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3 VOsc3.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, bufpos=bufpos, freq_1=freq_1, freq_2=freq_2, freq_3=freq_3, ) return ugen @classmethod def kr( cls, bufpos=None, freq_1=110, freq_2=220, freq_3=440, ): r'''Constructs a control-rate VOsc3. :: >>> vosc_3 = ugentools.VOsc3.kr( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3 VOsc3.kr() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.CONTROL ugen = cls._new_expanded( calculation_rate=calculation_rate, bufpos=bufpos, freq_1=freq_1, freq_2=freq_2, freq_3=freq_3, ) return ugen ### PUBLIC PROPERTIES ### @property def bufpos(self): r'''Gets `bufpos` input of VOsc3. :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3.bufpos Returns ugen input. ''' index = self._ordered_input_names.index('bufpos') return self._inputs[index] @property def freq_1(self): r'''Gets `freq_1` input of VOsc3. :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3.freq_1 110.0 Returns ugen input. ''' index = self._ordered_input_names.index('freq_1') return self._inputs[index] @property def freq_2(self): r'''Gets `freq_2` input of VOsc3. :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3.freq_2 220.0 Returns ugen input. ''' index = self._ordered_input_names.index('freq_2') return self._inputs[index] @property def freq_3(self): r'''Gets `freq_3` input of VOsc3. :: >>> vosc_3 = ugentools.VOsc3.ar( ... bufpos=bufpos, ... freq_1=110, ... freq_2=220, ... freq_3=440, ... ) >>> vosc_3.freq_3 440.0 Returns ugen input. ''' index = self._ordered_input_names.index('freq_3') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/pendingugentools/VOsc3.py
# -*- encoding: utf-8 -*- from supriya.tools.servertools.BusGroup import BusGroup class AudioInputBusGroup(BusGroup): ### CLASS VARIABLES ### __documentation_section__ = 'Server Internals' __slots__ = () ### INITIALIZER ### def __init__( self, server, ): from supriya.tools import servertools from supriya.tools import synthdeftools assert isinstance(server, servertools.Server) assert server.is_running self._server = server bus_id = server.server_options.output_bus_channel_count bus_count = server.server_options.input_bus_channel_count calculation_rate = synthdeftools.CalculationRate.AUDIO BusGroup.__init__( self, bus_count=bus_count, calculation_rate=calculation_rate, ) self._bus_id = bus_id ### PUBLIC METHODS ### def allocate(self): pass def free(self): pass
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/servertools/AudioInputBusGroup.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.UGen import UGen class Pluck(UGen): r'''A Karplus-String UGen. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck Pluck.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = 'Delay UGens' __slots__ = () _ordered_input_names = ( 'source', 'trigger', 'maximum_delay_time', 'delay_time', 'decay_time', 'coefficient', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, coefficient=0.5, decay_time=1, delay_time=0.2, maximum_delay_time=0.2, source=None, trigger=None, ): UGen.__init__( self, calculation_rate=calculation_rate, coefficient=coefficient, decay_time=decay_time, delay_time=delay_time, maximum_delay_time=maximum_delay_time, source=source, trigger=trigger, ) ### PUBLIC METHODS ### @classmethod def ar( cls, coefficient=0.5, decay_time=1, delay_time=0.2, maximum_delay_time=0.2, source=None, trigger=None, ): r'''Constructs an audio-rate Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck Pluck.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, coefficient=coefficient, decay_time=decay_time, delay_time=delay_time, maximum_delay_time=maximum_delay_time, source=source, trigger=trigger, ) return ugen ### PUBLIC PROPERTIES ### @property def coefficient(self): r'''Gets `coefficient` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.coefficient 0.5 Returns ugen input. ''' index = self._ordered_input_names.index('coefficient') return self._inputs[index] @property def decay_time(self): r'''Gets `decay_time` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.decay_time 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('decay_time') return self._inputs[index] @property def delay_time(self): r'''Gets `delay_time` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.delay_time 0.2 Returns ugen input. ''' index = self._ordered_input_names.index('delay_time') return self._inputs[index] @property def maximum_delay_time(self): r'''Gets `maximum_delay_time` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.maximum_delay_time 0.2 Returns ugen input. ''' index = self._ordered_input_names.index('maximum_delay_time') return self._inputs[index] @property def source(self): r'''Gets `source` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.source OutputProxy( source=WhiteNoise( calculation_rate=CalculationRate.AUDIO ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('source') return self._inputs[index] @property def trigger(self): r'''Gets `trigger` input of Pluck. :: >>> source = ugentools.WhiteNoise.ar() >>> trigger = ugentools.Dust.kr(2) >>> pluck = ugentools.Pluck.ar( ... coefficient=0.5, ... decay_time=1, ... delay_time=0.2, ... maximum_delay_time=0.2, ... source=source, ... trigger=trigger, ... ) >>> pluck.trigger OutputProxy( source=Dust( calculation_rate=CalculationRate.CONTROL, density=2.0 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('trigger') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/Pluck.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.UGen import UGen class HenonN(UGen): r'''A non-interpolating henon map chaotic generator. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n HenonN.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = 'Chaos UGens' __slots__ = () _ordered_input_names = ( 'frequency', 'a', 'b', 'x_0', 'x_1', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, a=1.4, b=0.3, frequency=22050, x_0=0, x_1=0, ): UGen.__init__( self, calculation_rate=calculation_rate, a=a, b=b, frequency=frequency, x_0=x_0, x_1=x_1, ) ### PUBLIC METHODS ### @classmethod def ar( cls, a=1.4, b=0.3, frequency=22050, x_0=0, x_1=0, ): r'''Constructs an audio-rate HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n HenonN.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, a=a, b=b, frequency=frequency, x_0=x_0, x_1=x_1, ) return ugen # def equation(): ... ### PUBLIC PROPERTIES ### @property def a(self): r'''Gets `a` input of HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n.a 1.4 Returns ugen input. ''' index = self._ordered_input_names.index('a') return self._inputs[index] @property def b(self): r'''Gets `b` input of HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n.b 0.3 Returns ugen input. ''' index = self._ordered_input_names.index('b') return self._inputs[index] @property def frequency(self): r'''Gets `frequency` input of HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n.frequency 22050.0 Returns ugen input. ''' index = self._ordered_input_names.index('frequency') return self._inputs[index] @property def x_0(self): r'''Gets `x_0` input of HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n.x_0 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('x_0') return self._inputs[index] @property def x_1(self): r'''Gets `x_1` input of HenonN. :: >>> henon_n = ugentools.HenonN.ar( ... a=1.4, ... b=0.3, ... frequency=22050, ... x_0=0, ... x_1=0, ... ) >>> henon_n.x_1 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('x_1') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/HenonN.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.UGen import UGen class Convolution2L(UGen): r''' :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l Convolution2L.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = None __slots__ = () _ordered_input_names = ( 'source', 'kernel', 'trigger', 'framesize', 'crossfade', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, crossfade=1, framesize=2048, kernel=None, source=None, trigger=0, ): UGen.__init__( self, calculation_rate=calculation_rate, crossfade=crossfade, framesize=framesize, kernel=kernel, source=source, trigger=trigger, ) ### PUBLIC METHODS ### @classmethod def ar( cls, crossfade=1, framesize=2048, kernel=None, source=None, trigger=0, ): r'''Constructs an audio-rate Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l Convolution2L.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, crossfade=crossfade, framesize=framesize, kernel=kernel, source=source, trigger=trigger, ) return ugen ### PUBLIC PROPERTIES ### @property def crossfade(self): r'''Gets `crossfade` input of Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l.crossfade 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('crossfade') return self._inputs[index] @property def framesize(self): r'''Gets `framesize` input of Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l.framesize 2048.0 Returns ugen input. ''' index = self._ordered_input_names.index('framesize') return self._inputs[index] @property def kernel(self): r'''Gets `kernel` input of Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l.kernel Returns ugen input. ''' index = self._ordered_input_names.index('kernel') return self._inputs[index] @property def source(self): r'''Gets `source` input of Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l.source OutputProxy( source=In( bus=0.0, calculation_rate=CalculationRate.AUDIO, channel_count=1 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('source') return self._inputs[index] @property def trigger(self): r'''Gets `trigger` input of Convolution2L. :: >>> source = ugentools.In.ar(bus=0) >>> convolution_2_l = ugentools.Convolution2L.ar( ... crossfade=1, ... framesize=2048, ... kernel=kernel, ... source=source, ... trigger=0, ... ) >>> convolution_2_l.trigger 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('trigger') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/pendingugentools/Convolution2L.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.UGen import UGen class RecordBuf(UGen): r'''Records or overdubs into a buffer. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf RecordBuf.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = 'Buffer UGens' __slots__ = () _ordered_input_names = ( 'buffer_id', 'offset', 'record_level', 'preexisting_level', 'run', 'loop', 'trigger', 'done_action', 'source', ) _unexpanded_input_names = ( 'source', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, buffer_id=None, done_action=0, source=None, loop=1, offset=0, preexisting_level=0, record_level=1, run=1, trigger=1, ): UGen.__init__( self, calculation_rate=calculation_rate, buffer_id=buffer_id, done_action=done_action, source=source, loop=loop, offset=offset, preexisting_level=preexisting_level, record_level=record_level, run=run, trigger=trigger, ) ### PUBLIC METHODS ### @classmethod def ar( cls, buffer_id=None, done_action=0, source=None, loop=1, offset=0, preexisting_level=0, record_level=1, run=1, trigger=1, ): r'''Constructs an audio-rate RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf RecordBuf.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, buffer_id=buffer_id, done_action=done_action, source=source, loop=loop, offset=offset, preexisting_level=preexisting_level, record_level=record_level, run=run, trigger=trigger, ) return ugen @classmethod def kr( cls, buffer_id=None, done_action=0, source=None, loop=1, offset=0, preexisting_level=0, record_level=1, run=1, trigger=1, ): r'''Constructs a control-rate RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.kr( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf RecordBuf.kr() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.CONTROL ugen = cls._new_expanded( calculation_rate=calculation_rate, buffer_id=buffer_id, done_action=done_action, source=source, loop=loop, offset=offset, preexisting_level=preexisting_level, record_level=record_level, run=run, trigger=trigger, ) return ugen ### PUBLIC PROPERTIES ### @property def buffer_id(self): r'''Gets `buffer_id` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.buffer_id 23.0 Returns ugen input. ''' index = self._ordered_input_names.index('buffer_id') return self._inputs[index] @property def done_action(self): r'''Gets `done_action` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.done_action 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('done_action') return self._inputs[index] @property def has_done_flag(self): r'''Is true if UGen has a done flag. Returns boolean. ''' return True @property def source(self): r'''Gets `source` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.source OutputProxy( source=In( bus=OutputProxy( source=NumOutputBuses( calculation_rate=CalculationRate.SCALAR ), output_index=0 ), calculation_rate=CalculationRate.AUDIO, channel_count=2 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('source') return self._inputs[index] @property def loop(self): r'''Gets `loop` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.loop 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('loop') return self._inputs[index] @property def offset(self): r'''Gets `offset` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.offset 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('offset') return self._inputs[index] @property def preexisting_level(self): r'''Gets `preexisting_level` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.preexisting_level 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('preexisting_level') return self._inputs[index] @property def record_level(self): r'''Gets `record_level` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.record_level 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('record_level') return self._inputs[index] @property def run(self): r'''Gets `run` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.run 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('run') return self._inputs[index] @property def trigger(self): r'''Gets `trigger` input of RecordBuf. :: >>> buffer_id = 23 >>> source = ugentools.SoundIn.ar(bus=(0, 1)) >>> record_buf = ugentools.RecordBuf.ar( ... buffer_id=buffer_id, ... done_action=0, ... loop=1, ... offset=0, ... preexisting_level=0, ... record_level=1, ... run=1, ... source=source, ... trigger=1, ... ) >>> record_buf.trigger 1.0 Returns ugen input. ''' index = self._ordered_input_names.index('trigger') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/RecordBuf.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.Filter import Filter class LPZ2(Filter): r'''A two zero fixed lowpass filter. :: >>> source = ugentools.In.ar(bus=0) >>> lpz_2 = ugentools.LPZ2.ar( ... source=source, ... ) >>> lpz_2 LPZ2.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = 'Filter UGens' __slots__ = () _ordered_input_names = ( 'source', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, source=None, ): Filter.__init__( self, calculation_rate=calculation_rate, source=source, ) ### PUBLIC METHODS ### @classmethod def ar( cls, source=None, ): r'''Constructs an audio-rate LPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> lpz_2 = ugentools.LPZ2.ar( ... source=source, ... ) >>> lpz_2 LPZ2.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, source=source, ) return ugen # def coeffs(): ... @classmethod def kr( cls, source=None, ): r'''Constructs a control-rate LPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> lpz_2 = ugentools.LPZ2.kr( ... source=source, ... ) >>> lpz_2 LPZ2.kr() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.CONTROL ugen = cls._new_expanded( calculation_rate=calculation_rate, source=source, ) return ugen # def magResponse(): ... # def magResponse2(): ... # def magResponse5(): ... # def magResponseN(): ... # def scopeResponse(): ... ### PUBLIC PROPERTIES ### @property def source(self): r'''Gets `source` input of LPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> lpz_2 = ugentools.LPZ2.ar( ... source=source, ... ) >>> lpz_2.source OutputProxy( source=In( bus=0.0, calculation_rate=CalculationRate.AUDIO, channel_count=1 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('source') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/LPZ2.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.PV_MagSquared import PV_MagSquared class PV_PhaseShift90(PV_MagSquared): r'''Shifts phase by 90 degrees. :: >>> pv_chain = ugentools.FFT( ... source=ugentools.WhiteNoise.ar(), ... ) >>> pv_phase_shift_90 = ugentools.PV_PhaseShift90( ... pv_chain=pv_chain, ... ) >>> pv_phase_shift_90 PV_PhaseShift90.kr() ''' ### CLASS VARIABLES ### __documentation_section__ = 'FFT UGens' __slots__ = () _ordered_input_names = ( 'pv_chain', ) ### INITIALIZER ### def __init__( self, pv_chain=None, ): PV_MagSquared.__init__( self, pv_chain=pv_chain, ) ### PUBLIC METHODS ### @classmethod def new( cls, pv_chain=None, ): r'''Constructs a PV_PhaseShift90. :: >>> pv_chain = ugentools.FFT( ... source=ugentools.WhiteNoise.ar(), ... ) >>> pv_phase_shift_90 = ugentools.PV_PhaseShift90.new( ... pv_chain=pv_chain, ... ) >>> pv_phase_shift_90 PV_PhaseShift90.kr() Returns ugen graph. ''' ugen = cls._new_expanded( pv_chain=pv_chain, ) return ugen ### PUBLIC PROPERTIES ### @property def pv_chain(self): r'''Gets `pv_chain` input of PV_PhaseShift90. :: >>> pv_chain = ugentools.FFT( ... source=ugentools.WhiteNoise.ar(), ... ) >>> pv_phase_shift_90 = ugentools.PV_PhaseShift90( ... pv_chain=pv_chain, ... ) >>> pv_phase_shift_90.pv_chain OutputProxy( source=FFT( buffer_id=OutputProxy( source=LocalBuf( frame_count=2048.0, channel_count=1.0, calculation_rate=CalculationRate.SCALAR ), output_index=0 ), source=OutputProxy( source=WhiteNoise( calculation_rate=CalculationRate.AUDIO ), output_index=0 ), active=1.0, hop=0.5, window_size=0.0, window_type=0.0 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('pv_chain') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/PV_PhaseShift90.py
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.LPZ2 import LPZ2 class HPZ2(LPZ2): r'''A two zero fixed midcut filter. :: >>> source = ugentools.In.ar(bus=0) >>> hpz_2 = ugentools.HPZ2.ar( ... source=source, ... ) >>> hpz_2 HPZ2.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = 'Filter UGens' __slots__ = () _ordered_input_names = ( 'source', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, source=None, ): LPZ2.__init__( self, calculation_rate=calculation_rate, source=source, ) ### PUBLIC METHODS ### @classmethod def ar( cls, source=None, ): r'''Constructs an audio-rate HPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> hpz_2 = ugentools.HPZ2.ar( ... source=source, ... ) >>> hpz_2 HPZ2.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, source=source, ) return ugen # def coeffs(): ... @classmethod def kr( cls, source=None, ): r'''Constructs a control-rate HPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> hpz_2 = ugentools.HPZ2.kr( ... source=source, ... ) >>> hpz_2 HPZ2.kr() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.CONTROL ugen = cls._new_expanded( calculation_rate=calculation_rate, source=source, ) return ugen # def magResponse(): ... # def magResponse2(): ... # def magResponse5(): ... # def magResponseN(): ... # def scopeResponse(): ... ### PUBLIC PROPERTIES ### @property def source(self): r'''Gets `source` input of HPZ2. :: >>> source = ugentools.In.ar(bus=0) >>> hpz_2 = ugentools.HPZ2.ar( ... source=source, ... ) >>> hpz_2.source OutputProxy( source=In( bus=0.0, calculation_rate=CalculationRate.AUDIO, channel_count=1 ), output_index=0 ) Returns ugen input. ''' index = self._ordered_input_names.index('source') return self._inputs[index]
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/tools/ugentools/HPZ2.py
# -*- encoding: utf-8 -*- from supriya import * def _make_synthdef(): with SynthDefBuilder( frequency_1=100, frequency_2=1000, frequency_3=3000, band_1_gain=1, band_2_gain=1, band_3_gain=1, band_4_gain=1, band_1_clamp_time=0.01, band_2_clamp_time=0.01, band_3_clamp_time=0.01, band_4_clamp_time=0.01, band_1_relax_time=0.1, band_2_relax_time=0.1, band_3_relax_time=0.1, band_4_relax_time=0.1, band_1_slope=0.5, band_2_slope=0.5, band_3_slope=0.5, band_4_slope=0.5, band_1_threshold=0.9, band_2_threshold=0.9, band_3_threshold=0.9, band_4_threshold=0.9, ) as builder: source = ugentools.In.ar(bus=0, channel_count=2) band_1 = ugentools.LPF.ar( frequency=builder['frequency_1'], source=source, ) band_4 = ugentools.HPF.ar( frequency=builder['frequency_3'], source=source, ) center = source - band_1 - band_4 band_2 = ugentools.LPF.ar( frequency=builder['frequency_2'], source=center, ) band_3 = ugentools.HPF.ar( frequency=builder['frequency_2'], source=center, ) band_1 = ugentools.CompanderD.ar( clamp_time=builder['band_1_clamp_time'], relax_time=builder['band_1_relax_time'], slope_above=builder['band_1_slope'], source=band_1, threshold=builder['band_1_threshold'], ) band_2 = ugentools.CompanderD.ar( clamp_time=builder['band_2_clamp_time'], relax_time=builder['band_2_relax_time'], slope_above=builder['band_2_slope'], source=band_2, threshold=builder['band_2_threshold'], ) band_3 = ugentools.CompanderD.ar( clamp_time=builder['band_3_clamp_time'], relax_time=builder['band_3_relax_time'], slope_above=builder['band_3_slope'], source=band_3, threshold=builder['band_3_threshold'], ) band_4 = ugentools.CompanderD.ar( clamp_time=builder['band_4_clamp_time'], relax_time=builder['band_4_relax_time'], slope_above=builder['band_4_slope'], source=band_4, threshold=builder['band_4_threshold'], ) band_1 *= builder['band_1_gain'] band_2 *= builder['band_2_gain'] band_3 *= builder['band_3_gain'] band_4 *= builder['band_4_gain'] source = ugentools.Sum4.new( input_one=band_1, input_two=band_2, input_three=band_3, input_four=band_4, ) ugentools.ReplaceOut.ar(bus=0, source=source) return builder.build() multiband_compressor = _make_synthdef() __all__ = ( 'multiband_compressor', )
# -*- encoding: utf-8 -*- import pytest import pytest from abjad.tools import systemtools from supriya import synthdefs from supriya.tools import osctools from supriya.tools import servertools @pytest.fixture(scope='function') def server(request): def server_teardown(): server.quit() server = servertools.Server().boot() server.debug_osc = True request.addfinalizer(server_teardown) return server def test_Node__handle_response_01(server): group_a = servertools.Group().allocate() group_b = servertools.Group().allocate() synth_a = servertools.Synth(synthdefs.test) synth_b = servertools.Synth(synthdefs.test) group_a.append(synth_a) group_b.append(synth_b) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1000 group 1002 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_after', synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1000 group 1002 test 1003 test ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state osc_message = osctools.OscMessage( '/n_order', 0, group_b.node_id, synth_b.node_id, synth_a.node_id, ) server.send_message(osc_message) remote_state = str(server.query_remote_nodes()) assert systemtools.TestManager.compare( remote_state, ''' NODE TREE 0 group 1 group 1001 group 1003 test 1002 test 1000 group ''', ), remote_state local_state = str(server.query_local_nodes()) assert local_state == remote_state
andrewyoung1991/supriya
supriya/tools/servertools/test/test_Node__handle_response.py
supriya/synthdefs/multiband_compressor.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import warnings import numbers import numpy import operator from . import _ni_support from . import _nd_image from . import _ni_docstrings __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] def _invalid_origin(origin, lenw): return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) @_ni_docstrings.docfiller def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array One-dimensional sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import correlate1d >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([ 8, 26, 8, 12, 7, 28, 36, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = _ni_support._check_axis(axis, input.ndim) if _invalid_origin(origin, len(weights)): raise ValueError('Invalid origin; origin must satisfy ' '-(len(weights) // 2) <= origin <= ' '(len(weights)-1) // 2') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return output @_ni_docstrings.docfiller def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray One-dimensional sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0]) """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 return correlate1d(input, weights, axis, output, mode, cval, origin) def _gaussian_kernel1d(sigma, order, radius): """ Computes a 1D Gaussian convolution kernel. """ if order < 0: raise ValueError('order must be non-negative') p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)]) x = numpy.arange(-radius, radius + 1) phi_x = numpy.exp(p(x), dtype=numpy.double) phi_x /= phi_x.sum() if order > 0: q = numpy.polynomial.Polynomial([1]) p_deriv = p.deriv() for _ in range(order): # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) q = q.deriv() + q * p_deriv phi_x *= q(x) return phi_x @_ni_docstrings.docfiller def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """One-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : int, optional An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter1d : ndarray Examples -------- >>> from scipy.ndimage import gaussian_filter1d >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) >>> import matplotlib.pyplot as plt >>> np.random.seed(280490) >>> x = np.random.randn(101).cumsum() >>> y3 = gaussian_filter1d(x, 3) >>> y6 = gaussian_filter1d(x, 6) >>> plt.plot(x, 'k', label='original data') >>> plt.plot(y3, '--', label='filtered, sigma=3') >>> plt.plot(y6, ':', label='filtered, sigma=6') >>> plt.legend() >>> plt.grid() >>> plt.show() """ sd = float(sigma) # make the radius of the filter equal to truncate standard deviations lw = int(truncate * sd + 0.5) # Since we are calling correlate, not convolve, revert the kernel weights = _gaussian_kernel1d(sigma, order, lw)[::-1] return correlate1d(input, weights, axis, output, mode, cval, 0) @_ni_docstrings.docfiller def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """Multidimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : int or sequence of ints, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_multiple)s %(cval)s truncate : float Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter : ndarray Returned array of same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of one-dimensional convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy.ndimage import gaussian_filter >>> a = np.arange(50, step=2).reshape((5,5)) >>> a array([[ 0, 2, 4, 6, 8], [10, 12, 14, 16, 18], [20, 22, 24, 26, 28], [30, 32, 34, 36, 38], [40, 42, 44, 46, 48]]) >>> gaussian_filter(a, sigma=1) array([[ 4, 6, 8, 9, 11], [10, 12, 14, 15, 17], [20, 22, 24, 25, 27], [29, 31, 33, 34, 36], [35, 37, 39, 40, 42]]) >>> from scipy import misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = gaussian_filter(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) orders = _ni_support._normalize_sequence(order, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order, mode in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval, truncate) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.prewitt(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) return output @_ni_docstrings.docfiller def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.sobel(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) return output @_ni_docstrings.docfiller def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """ N-dimensional Laplace filter using a provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative2(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return output @_ni_docstrings.docfiller def laplace(input, output=None, mode="reflect", cval=0.0): """N-dimensional Laplace filter based on approximate second derivatives. Parameters ---------- %(input)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.laplace(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @_ni_docstrings.docfiller def gaussian_laplace(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional Laplace filter using gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> ascent = misc.ascent() >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> result = ndimage.gaussian_laplace(ascent, sigma=1) >>> ax1.imshow(result) >>> result = ndimage.gaussian_laplace(ascent, sigma=3) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) @_ni_docstrings.docfiller def generic_gradient_magnitude(input, derivative, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Gradient magnitude using a provided gradient function. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting numpy.sqrt(output, output, casting='unsafe') else: output[...] = input[...] return output @_ni_docstrings.docfiller def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if _invalid_origin(origin, lenw): raise ValueError('Invalid origin; origin must satisfy ' '-(weights.shape[k] // 2) <= origin[k] <= ' '(weights.shape[k]-1) // 2') if not weights.flags.contiguous: weights = weights.copy() output = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) return output @_ni_docstrings.docfiller def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multi-dimensional correlation. The array is correlated with the given kernel. Parameters ---------- %(input)s weights : ndarray array of weights, same number of dimensions as input %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s See Also -------- convolve : Convolve an image with a kernel. """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @_ni_docstrings.docfiller def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional convolution. The array is convolved with the given kernel. Parameters ---------- %(input)s weights : array_like Array of weights, same number of dimensions as input %(output)s %(mode_multiple)s cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 %(origin_multiple)s Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where W is the `weights` kernel, j is the n-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e. where the `weights` kernel, centered on any one value, extends beyond an edge of `input`) are treated as zeros. >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @_ni_docstrings.docfiller def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : int length of uniform filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import uniform_filter1d >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([4, 3, 4, 1, 4, 6, 6, 3]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) return output @_ni_docstrings.docfiller def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, origin=0): """Multi-dimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints, optional The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- uniform_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.uniform_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin, mode in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Notes ----- This function implements the MINLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import minimum_filter1d >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([2, 0, 0, 0, 1, 1, 0, 0]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return output @_ni_docstrings.docfiller def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a one-dimensional maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if `output` is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return output def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable = True else: footprint = numpy.asarray(footprint, dtype=bool) if not footprint.any(): raise ValueError("All-zero footprint is not supported.") if footprint.all(): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint, dtype=bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin, mode in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if not structure.flags.contiguous: structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return output @_ni_docstrings.docfiller def minimum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- minimum_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.minimum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1) @_ni_docstrings.docfiller def maximum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0) @_ni_docstrings.docfiller def _rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation='rank'): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origins) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origins) else: output = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return output @_ni_docstrings.docfiller def rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional rank filter. Parameters ---------- %(input)s rank : int The rank parameter may be less then zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- rank_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.rank_filter(ascent, rank=42, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ rank = operator.index(rank) return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank') @_ni_docstrings.docfiller def median_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a multidimensional median filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- median_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.median_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median') @_ni_docstrings.docfiller def percentile_filter(input, percentile, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multi-dimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less then zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- percentile_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile') @_ni_docstrings.docfiller def generic_filter1d(input, function, filter_size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a one-dimensional filter along the given axis. `generic_filter1d` iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply along given axis. filter_size : scalar Length of the filter. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int function(double *input_line, npy_intp input_length, double *output_line, npy_intp output_length, void *user_data) int function(double *input_line, intptr_t input_length, double *output_line, intptr_t output_length, void *user_data) The calling function iterates over the lines of the input and output arrays, calling the callback function at each line. The current line is extended according to the border conditions set by the calling function, and the result is copied into the array that is passed through ``input_line``. The length of the input line (after extension) is passed through ``input_length``. The callback function should apply the filter and store the result in the array passed through ``output_line``. The length of the output line is passed through ``output_length``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = _ni_support._check_axis(axis, input.ndim) if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return output @_ni_docstrings.docfiller def generic_filter(input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply at each element. %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int callback(double *buffer, npy_intp filter_size, double *return_value, void *user_data) int callback(double *buffer, intptr_t filter_size, double *return_value, void *user_data) The calling function iterates over the elements of the input and output arrays, calling the callback function at each element. The elements within the footprint of the filter at the current element are passed through the ``buffer`` parameter, and the number of elements within the footprint through ``filter_size``. The calculated value is returned in ``return_value``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return output
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import sys from decimal import Decimal from itertools import product import warnings import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns, assert_array_less) from scipy._lib._numpy_compat import suppress_warnings from numpy import array, arange import numpy as np from scipy.ndimage.filters import correlate1d from scipy.optimize import fmin from scipy import signal from scipy.signal import ( correlate, convolve, convolve2d, fftconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients) from scipy.signal.windows import hann from scipy.signal.signaltools import _filtfilt_gust if sys.version_info.major >= 3 and sys.version_info.minor >= 5: from math import gcd else: from fractions import gcd class _TestConvolve(object): def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): types = sum([t for _, t in np.sctypes.items()], []) types = {np.dtype(t).name for t in types} # These types include 'bool' and all precisions (int8, float32, etc) # The removed types throw errors in correlate or fftconvolve for dtype in ['complex256', 'complex192', 'float128', 'float96', 'str', 'void', 'bytes', 'object', 'unicode', 'string']: if dtype in types: types.remove(dtype) args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-8} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d(object): def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_deprecations(self): # Deprecated 2017-07, scipy version 1.0.0 with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values to real") r = sup.record(DeprecationWarning, "could not cast `fillvalue`") convolve2d([[1]], [[1, 2]], fillvalue=1j) assert_(len(r) == 1) warnings.filterwarnings( "error", message="could not cast `fillvalue`", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=1j) with suppress_warnings(): warnings.filterwarnings( "always", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) warnings.filterwarnings( "error", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) class TestFFTConvolve(object): @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): np.random.seed(1234) a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.rollaxis(a.swapaxes(0, 2), 1, 5) b = np.rollaxis(b.swapaxes(0, 2), 1, 5) expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_invalid_shapes(self): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): fftconvolve(a, b, mode='valid') def test_invalid_shapes_axes(self): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): fftconvolve(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) def test_mismatched_dims(self, a, b): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): fftconvolve(a, b) def test_invalid_flags(self): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): fftconvolve([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): fftconvolve([1], [2], axes=[]) with assert_raises(ValueError, match="when given, axes values must be a scalar" " or vector"): fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="when given, axes values must be integers"): fftconvolve([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): fftconvolve([1], [2], axes=[0, 0]) class TestMedFilt(object): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, float), [7, 3]) assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) def test_none(self): # Ticket #1124. Ensure this does not segfault. signal.medfilt(None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) class TestWiener(object): def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) class TestResample(object): def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2,1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) def test_fft(self): # Test FFT-based resampling self._test_data(method='fft') def test_polyphase(self): # Test polyphase resampling self._test_data(method='polyphase') def test_polyphase_extfilter(self): # Test external specification of downsampling filter self._test_data(method='polyphase', ext=True) def test_mutable_window(self): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window) assert_array_equal(window, window_orig) def test_output_float32(self): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1,1,1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h) assert_(y.dtype == np.float32) def _test_data(self, method, ext=False): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window} else: polyargs = {} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j,0.+0.j]) y1_test = signal.resample(x1,4) y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1.,0.5,0.,0.5]) y2_test = signal.resample(x2,2) # downsampling a real array y2_true = np.array([1.,0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly(x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval(object): def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt(object): def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter(object): def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) with assert_raises(ValueError, match='common type'): lfilter([1.], [1., 1.], ['a', 'b', 'c']) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal(object): def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate(object): # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d(object): def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI(object): def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) class TestFiltFilt(object): filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=1e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-d arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-9, atol=1e-10) assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndims in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndims)) h = np.random.randn(*((k,) * ndims)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate(object): def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) class TestHilbert(object): def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') class TestHilbert2(object): def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) class TestPartialFractionExpansion(object): def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] a_expected = [1.0, 0.0] b_expected = [1.0, -2.0] a_observed, b_observed = invresz(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_distinct_roots(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 7, 10, 0] a_observed, b_observed = invres(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') # With the default tolerance, the rtype does not matter # for this example. for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) # With unrealistically large tolerances, repeated roots may be inferred # and the rtype comes into play. ridiculous_tolerance = 1e10 for rtype in rtypes: a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) def test_invres_repeated_roots(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 9, 24, 20, 0] rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] assert_raises(ValueError, invres, r, p, k, rtype='median') class TestVectorstrength(object): def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) class TestSOSFilt(object): # For sosfilt we only test a single datatype. Since sosfilt wraps # to lfilter under the hood, it's hopefully good enough to ensure # lfilter is extensively tested. dt = np.float64 # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = sosfilt(tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = sosfilt(tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8) y, zf = sosfilt(sos, x, zi=zi) assert_allclose(y, np.ones(8)) assert_allclose(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose(y[0, 0], np.ones(8)) assert_allclose(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose(y, yf, rtol=1e-10, atol=1e-13) assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3)) sos = np.empty((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) def test_sosfilt_zi(self): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40), zi=zi) assert_allclose(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose(y, ss, rtol=1e-13) class TestDeconvolve(object): def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original)
Eric89GXL/scipy
scipy/signal/tests/test_signaltools.py
scipy/ndimage/filters.py
from collections import namedtuple import numpy as np from . import distributions __all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes'] LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')) def linregress(x, y=None): """ Calculate a linear least-squares regression for two sets of measurements. Parameters ---------- x, y : array_like Two sets of measurements. Both arrays should have the same length. If only x is given (and y=None), then it must be a two-dimensional array where one dimension has length 2. The two sets of measurements are then found by splitting the array along the length-2 dimension. Returns ------- slope : float slope of the regression line intercept : float intercept of the regression line rvalue : float correlation coefficient pvalue : float two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero, using Wald Test with t-distribution of the test statistic. stderr : float Standard error of the estimated gradient. See also -------- :func:`scipy.optimize.curve_fit` : Use non-linear least squares to fit a function to data. :func:`scipy.optimize.leastsq` : Minimize the sum of squares of a set of equations. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats Generate some data: >>> np.random.seed(12345678) >>> x = np.random.random(10) >>> y = 1.6*x + np.random.random(10) Perform the linear regression: >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) >>> print("slope: %f intercept: %f" % (slope, intercept)) slope: 1.944864 intercept: 0.268578 To get coefficient of determination (r_squared): >>> print("r-squared: %f" % r_value**2) r-squared: 0.735498 Plot the data along with the fitted line: >>> plt.plot(x, y, 'o', label='original data') >>> plt.plot(x, intercept + slope*x, 'r', label='fitted line') >>> plt.legend() >>> plt.show() """ TINY = 1.0e-20 if y is None: # x is a (2, N) or (N, 2) shaped array_like x = np.asarray(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: msg = ("If only `x` is given as input, it has to be of shape " "(2, N) or (N, 2), provided shape was %s" % str(x.shape)) raise ValueError(msg) else: x = np.asarray(x) y = np.asarray(y) if x.size == 0 or y.size == 0: raise ValueError("Inputs must not be empty.") n = len(x) xmean = np.mean(x, None) ymean = np.mean(y, None) # average sum of squares: ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat r_num = ssxym r_den = np.sqrt(ssxm * ssym) if r_den == 0.0: r = 0.0 else: r = r_num / r_den # test for numerical error propagation if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 df = n - 2 slope = r_num / ssxm intercept = ymean - slope*xmean if n == 2: # handle case when only two points are passed in if y[0] == y[1]: prob = 1.0 else: prob = 0.0 sterrest = 0.0 else: t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) prob = 2 * distributions.t.sf(np.abs(t), df) sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df) return LinregressResult(slope, intercept, r, prob, sterrest) def theilslopes(y, x=None, alpha=0.95): r""" Computes the Theil-Sen estimator for a set of points (x, y). `theilslopes` implements a method for robust linear regression. It computes the slope as the median of all slopes between paired values. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. alpha : float, optional Confidence degree between 0 and 1. Default is 95% confidence. Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are interpreted as "find the 90% confidence interval". Returns ------- medslope : float Theil slope. medintercept : float Intercept of the Theil line, as ``median(y) - medslope*median(x)``. lo_slope : float Lower bound of the confidence interval on `medslope`. up_slope : float Upper bound of the confidence interval on `medslope`. See also -------- siegelslopes : a similar technique using repeated medians Notes ----- The implementation of `theilslopes` follows [1]_. The intercept is not defined in [1]_, and here it is defined as ``median(y) - medslope*median(x)``, which is given in [3]_. Other definitions of the intercept exist in the literature. A confidence interval for the intercept is not given as this question is not addressed in [1]_. References ---------- .. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. .. [2] H. Theil, "A rank-invariant method of linear and polynomial regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., John Wiley and Sons, New York, pp. 493. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> x = np.linspace(-5, 5, num=150) >>> y = x + np.random.normal(size=x.size) >>> y[11:15] += 10 # add outliers >>> y[-5:] -= 7 Compute the slope, intercept and 90% confidence interval. For comparison, also compute the least-squares fit with `linregress`: >>> res = stats.theilslopes(y, x, 0.90) >>> lsq_res = stats.linregress(x, y) Plot the results. The Theil-Sen regression line is shown in red, with the dashed red lines illustrating the confidence interval of the slope (note that the dashed red lines are not the confidence interval of the regression as the confidence interval of the intercept is not included). The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, res[1] + res[2] * x, 'r--') >>> ax.plot(x, res[1] + res[3] * x, 'r--') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show() """ # We copy both x and y so we can use _find_repeats. y = np.array(y).flatten() if x is None: x = np.arange(len(y), dtype=float) else: x = np.array(x, dtype=float).flatten() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) # Compute sorted slopes only when deltax > 0 deltax = x[:, np.newaxis] - x deltay = y[:, np.newaxis] - y slopes = deltay[deltax > 0] / deltax[deltax > 0] slopes.sort() medslope = np.median(slopes) medinter = np.median(y) - medslope * np.median(x) # Now compute confidence intervals if alpha > 0.5: alpha = 1. - alpha z = distributions.norm.ppf(alpha / 2.) # This implements (2.6) from Sen (1968) _, nxreps = _find_repeats(x) _, nyreps = _find_repeats(y) nt = len(slopes) # N in Sen (1968) ny = len(y) # n in Sen (1968) # Equation 2.6 in Sen (1968): sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - sum(k * (k-1) * (2*k + 5) for k in nxreps) - sum(k * (k-1) * (2*k + 5) for k in nyreps)) # Find the confidence interval indices in `slopes` sigma = np.sqrt(sigsq) Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) delta = slopes[[Rl, Ru]] return medslope, medinter, delta[0], delta[1] def _find_repeats(arr): # This function assumes it may clobber its input. if len(arr) == 0: return np.array(0, np.float64), np.array(0, np.intp) # XXX This cast was previously needed for the Fortran implementation, # should we ditch it? arr = np.asarray(arr, np.float64).ravel() arr.sort() # Taken from NumPy 1.9's np.unique. change = np.concatenate(([True], arr[1:] != arr[:-1])) unique = arr[change] change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) freq = np.diff(change_idx) atleast2 = freq > 1 return unique[atleast2], freq[atleast2] def siegelslopes(y, x=None, method="hierarchical"): r""" Computes the Siegel estimator for a set of points (x, y). `siegelslopes` implements a method for robust linear regression using repeated medians (see [1]_) to fit a line to the points (x, y). The method is robust to outliers with an asymptotic breakdown point of 50%. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. method : {'hierarchical', 'separate'} If 'hierarchical', estimate the intercept using the estimated slope ``medslope`` (default option). If 'separate', estimate the intercept independent of the estimated slope. See Notes for details. Returns ------- medslope : float Estimate of the slope of the regression line. medintercept : float Estimate of the intercept of the regression line. See also -------- theilslopes : a similar technique without repeated medians Notes ----- With ``n = len(y)``, compute ``m_j`` as the median of the slopes from the point ``(x[j], y[j])`` to all other `n-1` points. ``medslope`` is then the median of all slopes ``m_j``. Two ways are given to estimate the intercept in [1]_ which can be chosen via the parameter ``method``. The hierarchical approach uses the estimated slope ``medslope`` and computes ``medintercept`` as the median of ``y - medslope*x``. The other approach estimates the intercept separately as follows: for each point ``(x[j], y[j])``, compute the intercepts of all the `n-1` lines through the remaining points and take the median ``i_j``. ``medintercept`` is the median of the ``i_j``. The implementation computes `n` times the median of a vector of size `n` which can be slow for large vectors. There are more efficient algorithms (see [2]_) which are not implemented here. References ---------- .. [1] A. Siegel, "Robust Regression Using Repeated Medians", Biometrika, Vol. 69, pp. 242-244, 1982. .. [2] A. Stein and M. Werman, "Finding the repeated median regression line", Proceedings of the Third Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 409-413, 1992. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> x = np.linspace(-5, 5, num=150) >>> y = x + np.random.normal(size=x.size) >>> y[11:15] += 10 # add outliers >>> y[-5:] -= 7 Compute the slope and intercept. For comparison, also compute the least-squares fit with `linregress`: >>> res = stats.siegelslopes(y, x) >>> lsq_res = stats.linregress(x, y) Plot the results. The Siegel regression line is shown in red. The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show() """ if method not in ['hierarchical', 'separate']: raise ValueError("method can only be 'hierarchical' or 'separate'") y = np.asarray(y).ravel() if x is None: x = np.arange(len(y), dtype=float) else: x = np.asarray(x, dtype=float).ravel() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) deltax = x[:, np.newaxis] - x deltay = y[:, np.newaxis] - y slopes, intercepts = [], [] for j in range(len(x)): id_nonzero = deltax[j, :] != 0 slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero] medslope_j = np.median(slopes_j) slopes.append(medslope_j) if method == 'separate': z = y*x[j] - y[j]*x medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero]) intercepts.append(medintercept_j) medslope = np.median(np.asarray(slopes)) if method == "separate": medinter = np.median(np.asarray(intercepts)) else: medinter = np.median(y - medslope*x) return medslope, medinter
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import sys from decimal import Decimal from itertools import product import warnings import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns, assert_array_less) from scipy._lib._numpy_compat import suppress_warnings from numpy import array, arange import numpy as np from scipy.ndimage.filters import correlate1d from scipy.optimize import fmin from scipy import signal from scipy.signal import ( correlate, convolve, convolve2d, fftconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients) from scipy.signal.windows import hann from scipy.signal.signaltools import _filtfilt_gust if sys.version_info.major >= 3 and sys.version_info.minor >= 5: from math import gcd else: from fractions import gcd class _TestConvolve(object): def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): types = sum([t for _, t in np.sctypes.items()], []) types = {np.dtype(t).name for t in types} # These types include 'bool' and all precisions (int8, float32, etc) # The removed types throw errors in correlate or fftconvolve for dtype in ['complex256', 'complex192', 'float128', 'float96', 'str', 'void', 'bytes', 'object', 'unicode', 'string']: if dtype in types: types.remove(dtype) args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-8} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d(object): def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_deprecations(self): # Deprecated 2017-07, scipy version 1.0.0 with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values to real") r = sup.record(DeprecationWarning, "could not cast `fillvalue`") convolve2d([[1]], [[1, 2]], fillvalue=1j) assert_(len(r) == 1) warnings.filterwarnings( "error", message="could not cast `fillvalue`", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=1j) with suppress_warnings(): warnings.filterwarnings( "always", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) warnings.filterwarnings( "error", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) class TestFFTConvolve(object): @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): np.random.seed(1234) a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.rollaxis(a.swapaxes(0, 2), 1, 5) b = np.rollaxis(b.swapaxes(0, 2), 1, 5) expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_invalid_shapes(self): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): fftconvolve(a, b, mode='valid') def test_invalid_shapes_axes(self): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): fftconvolve(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) def test_mismatched_dims(self, a, b): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): fftconvolve(a, b) def test_invalid_flags(self): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): fftconvolve([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): fftconvolve([1], [2], axes=[]) with assert_raises(ValueError, match="when given, axes values must be a scalar" " or vector"): fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="when given, axes values must be integers"): fftconvolve([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): fftconvolve([1], [2], axes=[0, 0]) class TestMedFilt(object): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, float), [7, 3]) assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) def test_none(self): # Ticket #1124. Ensure this does not segfault. signal.medfilt(None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) class TestWiener(object): def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) class TestResample(object): def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2,1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) def test_fft(self): # Test FFT-based resampling self._test_data(method='fft') def test_polyphase(self): # Test polyphase resampling self._test_data(method='polyphase') def test_polyphase_extfilter(self): # Test external specification of downsampling filter self._test_data(method='polyphase', ext=True) def test_mutable_window(self): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window) assert_array_equal(window, window_orig) def test_output_float32(self): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1,1,1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h) assert_(y.dtype == np.float32) def _test_data(self, method, ext=False): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window} else: polyargs = {} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j,0.+0.j]) y1_test = signal.resample(x1,4) y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1.,0.5,0.,0.5]) y2_test = signal.resample(x2,2) # downsampling a real array y2_true = np.array([1.,0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly(x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval(object): def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt(object): def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter(object): def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) with assert_raises(ValueError, match='common type'): lfilter([1.], [1., 1.], ['a', 'b', 'c']) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal(object): def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate(object): # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d(object): def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI(object): def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) class TestFiltFilt(object): filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=1e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-d arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-9, atol=1e-10) assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndims in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndims)) h = np.random.randn(*((k,) * ndims)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate(object): def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) class TestHilbert(object): def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') class TestHilbert2(object): def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) class TestPartialFractionExpansion(object): def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] a_expected = [1.0, 0.0] b_expected = [1.0, -2.0] a_observed, b_observed = invresz(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_distinct_roots(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 7, 10, 0] a_observed, b_observed = invres(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') # With the default tolerance, the rtype does not matter # for this example. for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) # With unrealistically large tolerances, repeated roots may be inferred # and the rtype comes into play. ridiculous_tolerance = 1e10 for rtype in rtypes: a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) def test_invres_repeated_roots(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 9, 24, 20, 0] rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] assert_raises(ValueError, invres, r, p, k, rtype='median') class TestVectorstrength(object): def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) class TestSOSFilt(object): # For sosfilt we only test a single datatype. Since sosfilt wraps # to lfilter under the hood, it's hopefully good enough to ensure # lfilter is extensively tested. dt = np.float64 # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = sosfilt(tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = sosfilt(tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8) y, zf = sosfilt(sos, x, zi=zi) assert_allclose(y, np.ones(8)) assert_allclose(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose(y[0, 0], np.ones(8)) assert_allclose(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose(y, yf, rtol=1e-10, atol=1e-13) assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3)) sos = np.empty((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) def test_sosfilt_zi(self): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40), zi=zi) assert_allclose(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose(y, ss, rtol=1e-13) class TestDeconvolve(object): def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original)
Eric89GXL/scipy
scipy/signal/tests/test_signaltools.py
scipy/stats/_stats_mstats_common.py
from __future__ import division, print_function, absolute_import import numpy as np from pytest import raises as assert_raises from scipy.sparse.linalg import utils def test_make_system_bad_shape(): assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import sys from decimal import Decimal from itertools import product import warnings import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns, assert_array_less) from scipy._lib._numpy_compat import suppress_warnings from numpy import array, arange import numpy as np from scipy.ndimage.filters import correlate1d from scipy.optimize import fmin from scipy import signal from scipy.signal import ( correlate, convolve, convolve2d, fftconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients) from scipy.signal.windows import hann from scipy.signal.signaltools import _filtfilt_gust if sys.version_info.major >= 3 and sys.version_info.minor >= 5: from math import gcd else: from fractions import gcd class _TestConvolve(object): def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): types = sum([t for _, t in np.sctypes.items()], []) types = {np.dtype(t).name for t in types} # These types include 'bool' and all precisions (int8, float32, etc) # The removed types throw errors in correlate or fftconvolve for dtype in ['complex256', 'complex192', 'float128', 'float96', 'str', 'void', 'bytes', 'object', 'unicode', 'string']: if dtype in types: types.remove(dtype) args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-8} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d(object): def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_deprecations(self): # Deprecated 2017-07, scipy version 1.0.0 with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values to real") r = sup.record(DeprecationWarning, "could not cast `fillvalue`") convolve2d([[1]], [[1, 2]], fillvalue=1j) assert_(len(r) == 1) warnings.filterwarnings( "error", message="could not cast `fillvalue`", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=1j) with suppress_warnings(): warnings.filterwarnings( "always", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) warnings.filterwarnings( "error", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) class TestFFTConvolve(object): @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): np.random.seed(1234) a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.rollaxis(a.swapaxes(0, 2), 1, 5) b = np.rollaxis(b.swapaxes(0, 2), 1, 5) expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_invalid_shapes(self): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): fftconvolve(a, b, mode='valid') def test_invalid_shapes_axes(self): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): fftconvolve(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) def test_mismatched_dims(self, a, b): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): fftconvolve(a, b) def test_invalid_flags(self): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): fftconvolve([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): fftconvolve([1], [2], axes=[]) with assert_raises(ValueError, match="when given, axes values must be a scalar" " or vector"): fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="when given, axes values must be integers"): fftconvolve([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): fftconvolve([1], [2], axes=[0, 0]) class TestMedFilt(object): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, float), [7, 3]) assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) def test_none(self): # Ticket #1124. Ensure this does not segfault. signal.medfilt(None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) class TestWiener(object): def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) class TestResample(object): def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2,1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) def test_fft(self): # Test FFT-based resampling self._test_data(method='fft') def test_polyphase(self): # Test polyphase resampling self._test_data(method='polyphase') def test_polyphase_extfilter(self): # Test external specification of downsampling filter self._test_data(method='polyphase', ext=True) def test_mutable_window(self): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window) assert_array_equal(window, window_orig) def test_output_float32(self): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1,1,1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h) assert_(y.dtype == np.float32) def _test_data(self, method, ext=False): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window} else: polyargs = {} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j,0.+0.j]) y1_test = signal.resample(x1,4) y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1.,0.5,0.,0.5]) y2_test = signal.resample(x2,2) # downsampling a real array y2_true = np.array([1.,0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly(x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval(object): def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt(object): def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter(object): def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) with assert_raises(ValueError, match='common type'): lfilter([1.], [1., 1.], ['a', 'b', 'c']) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal(object): def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate(object): # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d(object): def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI(object): def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) class TestFiltFilt(object): filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=1e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-d arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-9, atol=1e-10) assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndims in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndims)) h = np.random.randn(*((k,) * ndims)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate(object): def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) class TestHilbert(object): def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') class TestHilbert2(object): def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) class TestPartialFractionExpansion(object): def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] a_expected = [1.0, 0.0] b_expected = [1.0, -2.0] a_observed, b_observed = invresz(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_distinct_roots(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 7, 10, 0] a_observed, b_observed = invres(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') # With the default tolerance, the rtype does not matter # for this example. for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) # With unrealistically large tolerances, repeated roots may be inferred # and the rtype comes into play. ridiculous_tolerance = 1e10 for rtype in rtypes: a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) def test_invres_repeated_roots(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 9, 24, 20, 0] rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] assert_raises(ValueError, invres, r, p, k, rtype='median') class TestVectorstrength(object): def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) class TestSOSFilt(object): # For sosfilt we only test a single datatype. Since sosfilt wraps # to lfilter under the hood, it's hopefully good enough to ensure # lfilter is extensively tested. dt = np.float64 # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = sosfilt(tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = sosfilt(tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8) y, zf = sosfilt(sos, x, zi=zi) assert_allclose(y, np.ones(8)) assert_allclose(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose(y[0, 0], np.ones(8)) assert_allclose(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose(y, yf, rtol=1e-10, atol=1e-13) assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3)) sos = np.empty((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) def test_sosfilt_zi(self): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40), zi=zi) assert_allclose(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose(y, ss, rtol=1e-13) class TestDeconvolve(object): def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original)
Eric89GXL/scipy
scipy/signal/tests/test_signaltools.py
scipy/sparse/linalg/isolve/tests/test_utils.py
"""Tests for _sketches.py.""" from __future__ import division, print_function, absolute_import import numpy as np from scipy.linalg import clarkson_woodruff_transform from numpy.testing import assert_ def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01): """ Make some random data with Gaussian distributed values """ np.random.seed(142352345) res = np.random.normal(mu, sigma, n_rows*n_columns) return np.reshape(res, (n_rows, n_columns)) class TestClarksonWoodruffTransform(object): """ Testing the Clarkson Woodruff Transform """ # Big dense matrix dimensions n_matrix_rows = 2000 n_matrix_columns = 100 # Sketch matrix dimensions n_sketch_rows = 100 # Error threshold threshold = 0.1 dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows, n_matrix_columns) def test_sketch_dimensions(self): sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows) assert_(sketch.shape == (self.n_sketch_rows, self.dense_big_matrix.shape[1])) def test_sketch_rows_norm(self): # Given the probabilistic nature of the sketches # we run the 'test' multiple times and check that # we pass all/almost all the tries n_errors = 0 seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431, 1302443994, 1521083269, 1501189312, 1126232505, 1533465685] for seed_ in seeds: sketch = clarkson_woodruff_transform(self.dense_big_matrix, self.n_sketch_rows, seed_) # We could use other norms (like L2) err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch) if err > self.threshold: n_errors += 1 assert_(n_errors == 0)
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import sys from decimal import Decimal from itertools import product import warnings import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns, assert_array_less) from scipy._lib._numpy_compat import suppress_warnings from numpy import array, arange import numpy as np from scipy.ndimage.filters import correlate1d from scipy.optimize import fmin from scipy import signal from scipy.signal import ( correlate, convolve, convolve2d, fftconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients) from scipy.signal.windows import hann from scipy.signal.signaltools import _filtfilt_gust if sys.version_info.major >= 3 and sys.version_info.minor >= 5: from math import gcd else: from fractions import gcd class _TestConvolve(object): def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): types = sum([t for _, t in np.sctypes.items()], []) types = {np.dtype(t).name for t in types} # These types include 'bool' and all precisions (int8, float32, etc) # The removed types throw errors in correlate or fftconvolve for dtype in ['complex256', 'complex192', 'float128', 'float96', 'str', 'void', 'bytes', 'object', 'unicode', 'string']: if dtype in types: types.remove(dtype) args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-8} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d(object): def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_deprecations(self): # Deprecated 2017-07, scipy version 1.0.0 with suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values to real") r = sup.record(DeprecationWarning, "could not cast `fillvalue`") convolve2d([[1]], [[1, 2]], fillvalue=1j) assert_(len(r) == 1) warnings.filterwarnings( "error", message="could not cast `fillvalue`", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=1j) with suppress_warnings(): warnings.filterwarnings( "always", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) warnings.filterwarnings( "error", message="`fillvalue` must be scalar or an array ", category=DeprecationWarning) assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) class TestFFTConvolve(object): @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): np.random.seed(1234) a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.rollaxis(a.swapaxes(0, 2), 1, 5) b = np.rollaxis(b.swapaxes(0, 2), 1, 5) expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_invalid_shapes(self): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): fftconvolve(a, b, mode='valid') def test_invalid_shapes_axes(self): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): fftconvolve(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) def test_mismatched_dims(self, a, b): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): fftconvolve(a, b) def test_invalid_flags(self): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): fftconvolve([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): fftconvolve([1], [2], axes=[]) with assert_raises(ValueError, match="when given, axes values must be a scalar" " or vector"): fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="when given, axes values must be integers"): fftconvolve([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): fftconvolve([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): fftconvolve([1], [2], axes=[0, 0]) class TestMedFilt(object): def test_basic(self): f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] d = signal.medfilt(f, [7, 3]) e = signal.medfilt2d(np.array(f, float), [7, 3]) assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) assert_array_equal(d, e) def test_none(self): # Ticket #1124. Ensure this does not segfault. signal.medfilt(None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) class TestWiener(object): def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) class TestResample(object): def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2,1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) def test_fft(self): # Test FFT-based resampling self._test_data(method='fft') def test_polyphase(self): # Test polyphase resampling self._test_data(method='polyphase') def test_polyphase_extfilter(self): # Test external specification of downsampling filter self._test_data(method='polyphase', ext=True) def test_mutable_window(self): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window) assert_array_equal(window, window_orig) def test_output_float32(self): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1,1,1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h) assert_(y.dtype == np.float32) def _test_data(self, method, ext=False): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window} else: polyargs = {} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j,0.+0.j]) y1_test = signal.resample(x1,4) y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1.,0.5,0.,0.5]) y2_test = signal.resample(x2,2) # downsampling a real array y2_true = np.array([1.,0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly(x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval(object): def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt(object): def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter(object): def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) with assert_raises(ValueError, match='common type'): lfilter([1.], [1., 1.], ['a', 'b', 'c']) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal(object): def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate(object): # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d(object): def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI(object): def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) class TestFiltFilt(object): filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=1e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-d arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-9, atol=1e-10) assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndims in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndims)) h = np.random.randn(*((k,) * ndims)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate(object): def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) class TestHilbert(object): def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') class TestHilbert2(object): def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) class TestPartialFractionExpansion(object): def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] a_expected = [1.0, 0.0] b_expected = [1.0, -2.0] a_observed, b_observed = invresz(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_distinct_roots(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 7, 10, 0] a_observed, b_observed = invres(r, p, k) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') # With the default tolerance, the rtype does not matter # for this example. for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) # With unrealistically large tolerances, repeated roots may be inferred # and the rtype comes into play. ridiculous_tolerance = 1e10 for rtype in rtypes: a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) def test_invres_repeated_roots(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] a_expected = [1, 3] b_expected = [1, 9, 24, 20, 0] rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') for rtype in rtypes: a_observed, b_observed = invres(r, p, k, rtype=rtype) assert_allclose(a_observed, a_expected) assert_allclose(b_observed, b_expected) def test_invres_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] assert_raises(ValueError, invres, r, p, k, rtype='median') class TestVectorstrength(object): def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) class TestSOSFilt(object): # For sosfilt we only test a single datatype. Since sosfilt wraps # to lfilter under the hood, it's hopefully good enough to ensure # lfilter is extensively tested. dt = np.float64 # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self): x = np.linspace(0, 5, 6).astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, -0.5]).astype(self.dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(self.dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(self.dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(self.dt) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=self.dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=self.dt) y = sosfilt(tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(self.dt) a = np.array([0.5, 0.5]).astype(self.dt) # Test last axis y = sosfilt(tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8) y, zf = sosfilt(sos, x, zi=zi) assert_allclose(y, np.ones(8)) assert_allclose(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose(y[0, 0], np.ones(8)) assert_allclose(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose(y, yf, rtol=1e-10, atol=1e-13) assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3)) sos = np.empty((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) def test_sosfilt_zi(self): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40), zi=zi) assert_allclose(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose(y, ss, rtol=1e-13) class TestDeconvolve(object): def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original)
Eric89GXL/scipy
scipy/signal/tests/test_signaltools.py
scipy/linalg/tests/test_sketches.py
from marshmallow.validate import * # NOQA class NotEmpty(Validator): def __call__(self, value): if len(value) == 0: raise ValidationError('Must not be empty.')
import pytest from marshmallow import ValidationError from skylines.schemas import ClubSchema def test_deserialization_fails_for_empty_name(): with pytest.raises(ValidationError) as e: ClubSchema(only=('name',)).load(dict(name='')) errors = e.value.messages assert 'name' in errors assert 'Must not be empty.' in errors.get('name') def test_deserialization_fails_for_spaced_name(): with pytest.raises(ValidationError) as e: ClubSchema(only=('name',)).load(dict(name=' ')) errors = e.value.messages assert 'name' in errors assert 'Must not be empty.' in errors.get('name') def test_deserialization_passes_for_valid_name(): data = ClubSchema(only=('name',)).load(dict(name=' foo ')).data assert data['name'] == 'foo' def test_serialization_passes_for_invalid_website(): data = ClubSchema().dump(dict(website='foobar')).data assert data['website'] == 'foobar'
kerel-fs/skylines
tests/schemas/schemas/test_club.py
skylines/schemas/validate.py
# --------------------------------------------------------------------- # JSON normalization routines import copy from collections import defaultdict import numpy as np from pandas._libs.writers import convert_json_to_lines from pandas import compat, DataFrame def _convert_to_line_delimits(s): """Helper function that converts json lists to line delimited json.""" # Determine we have a JSON list to turn to lines otherwise just return the # json object, only lists can if not s[0] == '[' and s[-1] == ']': return s s = s[1:-1] return convert_json_to_lines(s) def nested_to_record(ds, prefix="", sep=".", level=0): """a simplified json_normalize converts a nested dict into a flat dict ("record"), unlike json_normalize, it does not attempt to extract a subset of the data. Parameters ---------- ds : dict or list of dicts prefix: the prefix, optional, default: "" sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 level: the number of levels in the jason string, optional, default: 0 Returns ------- d - dict or list of dicts, matching `ds` Examples -------- IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2), nested=dict(e=dict(c=1,d=2),d=2))) Out[52]: {'dict1.c': 1, 'dict1.d': 2, 'flat1': 1, 'nested.d': 2, 'nested.e.c': 1, 'nested.e.d': 2} """ singleton = False if isinstance(ds, dict): ds = [ds] singleton = True new_ds = [] for d in ds: new_d = copy.deepcopy(d) for k, v in d.items(): # each key gets renamed with prefix if not isinstance(k, compat.string_types): k = str(k) if level == 0: newkey = k else: newkey = prefix + sep + k # only dicts gets recurse-flattend # only at level>1 do we rename the rest of the keys if not isinstance(v, dict): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v continue else: v = new_d.pop(k) new_d.update(nested_to_record(v, newkey, sep, level + 1)) new_ds.append(new_d) if singleton: return new_ds[0] return new_ds def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, errors='raise', sep='.'): """ "Normalize" semi-structured JSON data into a flat table Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] meta_prefix : string, default None errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 """ def _pull_field(js, spec): result = js if isinstance(spec, list): for field in spec: result = result[field] else: result = result[spec] return result if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob if isinstance(data, dict): data = [data] if record_path is None: if any([[isinstance(x, dict) for x in compat.itervalues(y)] for y in data]): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] lengths = [] meta_vals = defaultdict(list) if not isinstance(sep, compat.string_types): sep = str(sep) meta_keys = [sep.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if len(path) > 1: for obj in data: for val, key in zip(meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_field(obj, path[0]) # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: try: meta_val = _pull_field(obj, val[level:]) except KeyError as e: if errors == 'ignore': meta_val = np.nan else: raise \ KeyError("Try running with " "errors='ignore' as key " "{err} is not always present" .format(err=e)) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename( columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem for k, v in compat.iteritems(meta_vals): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError('Conflicting metadata name {name}, ' 'need distinguishing prefix '.format(name=k)) result[k] = np.array(v).repeat(lengths) return result
# -*- coding: utf-8 -*- import numpy as np from numpy.random import randint from textwrap import dedent import pytest import pandas as pd from pandas import DataFrame from pandas import read_clipboard from pandas import get_option from pandas.compat import PY2 from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf from pandas.io.clipboard.exceptions import PyperclipException from pandas.io.clipboard import clipboard_set, clipboard_get try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0 def build_kwargs(sep, excel): kwargs = {} if excel != 'default': kwargs['excel'] = excel if sep != 'default': kwargs['sep'] = sep return kwargs @pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', 'colwidth', 'mixed', 'float', 'int']) def df(request): data_type = request.param if data_type == 'delims': return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'], 'b': ['hi\'j', 'k\'\'lm']}) elif data_type == 'utf8': return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], 'b': ['øπ∆˚¬', 'œ∑´®']}) elif data_type == 'string': return mkdf(5, 3, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'long': max_rows = get_option('display.max_rows') return mkdf(max_rows + 1, 3, data_gen_f=lambda *args: randint(2), c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'nonascii': return pd.DataFrame({'en': 'in English'.split(), 'es': 'en español'.split()}) elif data_type == 'colwidth': _cw = get_option('display.max_colwidth') + 1 return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'mixed': return DataFrame({'a': np.arange(1.0, 6.0) + 0.01, 'b': np.arange(1, 6), 'c': list('abcde')}) elif data_type == 'float': return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'int': return mkdf(5, 3, data_gen_f=lambda *args: randint(2), c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) else: raise ValueError @pytest.mark.single @pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed") class TestClipboard(object): def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None): data.to_clipboard(excel=excel, sep=sep, encoding=encoding) result = read_clipboard(sep=sep or '\t', index_col=0, encoding=encoding) tm.assert_frame_equal(data, result, check_dtype=False) # Test that default arguments copy as tab delimited def test_round_trip_frame(self, df): self.check_round_trip_frame(df) # Test that explicit delimiters are respected @pytest.mark.parametrize('sep', ['\t', ',', '|']) def test_round_trip_frame_sep(self, df, sep): self.check_round_trip_frame(df, sep=sep) # Test white space separator def test_round_trip_frame_string(self, df): df.to_clipboard(excel=False, sep=None) result = read_clipboard() assert df.to_string() == result.to_string() assert df.shape == result.shape # Two character separator is not supported in to_clipboard # Test that multi-character separators are not silently passed def test_excel_sep_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=True, sep=r'\t') # Separator is ignored when excel=False and should produce a warning def test_copy_delim_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=False, sep='\t') # Tests that the default behavior of to_clipboard is tab # delimited and excel="True" @pytest.mark.parametrize('sep', ['\t', None, 'default']) @pytest.mark.parametrize('excel', [True, None, 'default']) def test_clipboard_copy_tabs_default(self, sep, excel, df): kwargs = build_kwargs(sep, excel) df.to_clipboard(**kwargs) if PY2: # to_clipboard copies unicode, to_csv produces bytes. This is # expected behavior assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t') else: assert clipboard_get() == df.to_csv(sep='\t') # Tests reading of white space separated tables @pytest.mark.parametrize('sep', [None, 'default']) @pytest.mark.parametrize('excel', [False]) def test_clipboard_copy_strings(self, sep, excel, df): kwargs = build_kwargs(sep, excel) df.to_clipboard(**kwargs) result = read_clipboard(sep=r'\s+') assert result.to_string() == df.to_string() assert df.shape == result.shape def test_read_clipboard_infer_excel(self): # gh-19010: avoid warnings clip_kwargs = dict(engine="python") text = dedent(""" John James Charlie Mingus 1 2 4 Harry Carney """.strip()) clipboard_set(text) df = pd.read_clipboard(**clip_kwargs) # excel data is parsed correctly assert df.iloc[1][1] == 'Harry Carney' # having diff tab counts doesn't trigger it text = dedent(""" a\t b 1 2 3 4 """.strip()) clipboard_set(text) res = pd.read_clipboard(**clip_kwargs) text = dedent(""" a b 1 2 3 4 """.strip()) clipboard_set(text) exp = pd.read_clipboard(**clip_kwargs) tm.assert_frame_equal(res, exp) def test_invalid_encoding(self, df): # test case for testing invalid encoding with pytest.raises(ValueError): df.to_clipboard(encoding='ascii') with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) def test_round_trip_valid_encodings(self, enc, df): self.check_round_trip_frame(df, encoding=enc)
kdebrab/pandas
pandas/tests/io/test_clipboard.py
pandas/io/json/normalize.py
# -*- coding: utf-8 -*- import warnings import pandas as pd import pandas.util.testing as tm from pandas import MultiIndex, compat from pandas.compat import PY3, range, u def test_dtype_str(indices): dtype = indices.dtype_str assert isinstance(dtype, compat.string_types) assert dtype == str(indices.dtype) def test_format(idx): idx.format() idx[:0].format() def test_format_integer_names(): index = MultiIndex(levels=[[0, 1], [0, 1]], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]) index.format(names=True) def test_format_sparse_config(idx): warn_filters = warnings.filters warnings.filterwarnings('ignore', category=FutureWarning, module=".*format") # GH1538 pd.set_option('display.multi_sparse', False) result = idx.format() assert result[1] == 'foo two' tm.reset_display_options() warnings.filters = warn_filters def test_format_sparse_display(): index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]], labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) result = index.format() assert result[3] == '1 0 0 0' def test_repr_with_unicode_data(): with pd.core.config.option_context("display.encoding", 'UTF-8'): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} index = pd.DataFrame(d).set_index(["a", "b"]).index assert "\\u" not in repr(index) # we don't want unicode-escaped def test_repr_roundtrip(): mi = MultiIndex.from_product([list('ab'), range(3)], names=['first', 'second']) str(mi) if PY3: tm.assert_index_equal(eval(repr(mi)), mi, exact=True) else: result = eval(repr(mi)) # string coerces to unicode tm.assert_index_equal(result, mi, exact=False) assert mi.get_level_values('first').inferred_type == 'string' assert result.get_level_values('first').inferred_type == 'unicode' mi_u = MultiIndex.from_product( [list(u'ab'), range(3)], names=['first', 'second']) result = eval(repr(mi_u)) tm.assert_index_equal(result, mi_u, exact=True) # formatting if PY3: str(mi) else: compat.text_type(mi) # long format mi = MultiIndex.from_product([list('abcdefg'), range(10)], names=['first', 'second']) if PY3: tm.assert_index_equal(eval(repr(mi)), mi, exact=True) else: result = eval(repr(mi)) # string coerces to unicode tm.assert_index_equal(result, mi, exact=False) assert mi.get_level_values('first').inferred_type == 'string' assert result.get_level_values('first').inferred_type == 'unicode' result = eval(repr(mi_u)) tm.assert_index_equal(result, mi_u, exact=True) def test_str(): # tested elsewhere pass def test_unicode_string_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index if PY3: str(idx) else: compat.text_type(idx) def test_bytestring_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index if PY3: bytes(idx) else: str(idx) def test_repr_max_seq_item_setting(idx): # GH10182 idx = idx.repeat(50) with pd.option_context("display.max_seq_items", None): repr(idx) assert '...' not in str(idx)
# -*- coding: utf-8 -*- import numpy as np from numpy.random import randint from textwrap import dedent import pytest import pandas as pd from pandas import DataFrame from pandas import read_clipboard from pandas import get_option from pandas.compat import PY2 from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf from pandas.io.clipboard.exceptions import PyperclipException from pandas.io.clipboard import clipboard_set, clipboard_get try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0 def build_kwargs(sep, excel): kwargs = {} if excel != 'default': kwargs['excel'] = excel if sep != 'default': kwargs['sep'] = sep return kwargs @pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', 'colwidth', 'mixed', 'float', 'int']) def df(request): data_type = request.param if data_type == 'delims': return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'], 'b': ['hi\'j', 'k\'\'lm']}) elif data_type == 'utf8': return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], 'b': ['øπ∆˚¬', 'œ∑´®']}) elif data_type == 'string': return mkdf(5, 3, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'long': max_rows = get_option('display.max_rows') return mkdf(max_rows + 1, 3, data_gen_f=lambda *args: randint(2), c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'nonascii': return pd.DataFrame({'en': 'in English'.split(), 'es': 'en español'.split()}) elif data_type == 'colwidth': _cw = get_option('display.max_colwidth') + 1 return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'mixed': return DataFrame({'a': np.arange(1.0, 6.0) + 0.01, 'b': np.arange(1, 6), 'c': list('abcde')}) elif data_type == 'float': return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) elif data_type == 'int': return mkdf(5, 3, data_gen_f=lambda *args: randint(2), c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) else: raise ValueError @pytest.mark.single @pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed") class TestClipboard(object): def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None): data.to_clipboard(excel=excel, sep=sep, encoding=encoding) result = read_clipboard(sep=sep or '\t', index_col=0, encoding=encoding) tm.assert_frame_equal(data, result, check_dtype=False) # Test that default arguments copy as tab delimited def test_round_trip_frame(self, df): self.check_round_trip_frame(df) # Test that explicit delimiters are respected @pytest.mark.parametrize('sep', ['\t', ',', '|']) def test_round_trip_frame_sep(self, df, sep): self.check_round_trip_frame(df, sep=sep) # Test white space separator def test_round_trip_frame_string(self, df): df.to_clipboard(excel=False, sep=None) result = read_clipboard() assert df.to_string() == result.to_string() assert df.shape == result.shape # Two character separator is not supported in to_clipboard # Test that multi-character separators are not silently passed def test_excel_sep_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=True, sep=r'\t') # Separator is ignored when excel=False and should produce a warning def test_copy_delim_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=False, sep='\t') # Tests that the default behavior of to_clipboard is tab # delimited and excel="True" @pytest.mark.parametrize('sep', ['\t', None, 'default']) @pytest.mark.parametrize('excel', [True, None, 'default']) def test_clipboard_copy_tabs_default(self, sep, excel, df): kwargs = build_kwargs(sep, excel) df.to_clipboard(**kwargs) if PY2: # to_clipboard copies unicode, to_csv produces bytes. This is # expected behavior assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t') else: assert clipboard_get() == df.to_csv(sep='\t') # Tests reading of white space separated tables @pytest.mark.parametrize('sep', [None, 'default']) @pytest.mark.parametrize('excel', [False]) def test_clipboard_copy_strings(self, sep, excel, df): kwargs = build_kwargs(sep, excel) df.to_clipboard(**kwargs) result = read_clipboard(sep=r'\s+') assert result.to_string() == df.to_string() assert df.shape == result.shape def test_read_clipboard_infer_excel(self): # gh-19010: avoid warnings clip_kwargs = dict(engine="python") text = dedent(""" John James Charlie Mingus 1 2 4 Harry Carney """.strip()) clipboard_set(text) df = pd.read_clipboard(**clip_kwargs) # excel data is parsed correctly assert df.iloc[1][1] == 'Harry Carney' # having diff tab counts doesn't trigger it text = dedent(""" a\t b 1 2 3 4 """.strip()) clipboard_set(text) res = pd.read_clipboard(**clip_kwargs) text = dedent(""" a b 1 2 3 4 """.strip()) clipboard_set(text) exp = pd.read_clipboard(**clip_kwargs) tm.assert_frame_equal(res, exp) def test_invalid_encoding(self, df): # test case for testing invalid encoding with pytest.raises(ValueError): df.to_clipboard(encoding='ascii') with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) def test_round_trip_valid_encodings(self, enc, df): self.check_round_trip_frame(df, encoding=enc)
kdebrab/pandas
pandas/tests/io/test_clipboard.py
pandas/tests/indexes/multi/test_format.py
from __future__ import division, absolute_import, print_function import functools import itertools import operator import sys import warnings import numbers import contextlib import numpy as np from numpy.compat import pickle, basestring from . import multiarray from .multiarray import ( _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, WRAP, arange, array, broadcast, can_cast, compare_chararrays, concatenate, copyto, dot, dtype, empty, empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, inner, int_asbuffer, lexsort, matmul, may_share_memory, min_scalar_type, ndarray, nditer, nested_iters, promote_types, putmask, result_type, set_numeric_ops, shares_memory, vdot, where, zeros, normalize_axis_index) if sys.version_info[0] < 3: from .multiarray import newbuffer, getbuffer from . import overrides from . import umath from . import shape_base from .overrides import set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes from .numerictypes import longlong, intc, int_, float_, complex_, bool_ from ._exceptions import TooHardError, AxisError from ._asarray import asarray, asanyarray from ._ufunc_config import errstate bitwise_not = invert ufunc = type(sin) newaxis = None if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') __all__ = [ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError'] if sys.version_info[0] < 3: __all__.extend(['getbuffer', 'newbuffer']) @set_module('numpy') class ComplexWarning(RuntimeWarning): """ The warning raised when casting a complex dtype to a real dtype. As implemented, casting a complex number to a real discards its imaginary part, but this behavior may not be what the user actually wants. """ pass def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): return (a,) @array_function_dispatch(_zeros_like_dispatcher) def zeros_like(a, dtype=None, order='K', subok=True, shape=None): """ Return an array of zeros with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. shape : int or sequence of ints, optional. Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. .. versionadded:: 1.17.0 Returns ------- out : ndarray Array of zeros with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. full_like : Return a new array with shape of input filled with value. zeros : Return a new array setting values to zero. Examples -------- >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.zeros_like(x) array([[0, 0, 0], [0, 0, 0]]) >>> y = np.arange(3, dtype=float) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) array([0., 0., 0.]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) # needed instead of a 0 to get same result as zeros for for string dtypes z = zeros(1, dtype=res.dtype) multiarray.copyto(res, z, casting='unsafe') return res @set_module('numpy') def ones(shape, dtype=None, order='C'): """ Return a new array of given shape and type, filled with ones. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: C Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of ones with the given shape, dtype, and order. See Also -------- ones_like : Return an array of ones with shape and type of input. empty : Return a new uninitialized array. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Examples -------- >>> np.ones(5) array([1., 1., 1., 1., 1.]) >>> np.ones((5,), dtype=int) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) array([[1.], [1.]]) >>> s = (2,2) >>> np.ones(s) array([[1., 1.], [1., 1.]]) """ a = empty(shape, dtype, order) multiarray.copyto(a, 1, casting='unsafe') return a def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): return (a,) @array_function_dispatch(_ones_like_dispatcher) def ones_like(a, dtype=None, order='K', subok=True, shape=None): """ Return an array of ones with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. shape : int or sequence of ints, optional. Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. .. versionadded:: 1.17.0 Returns ------- out : ndarray Array of ones with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. ones : Return a new array setting values to one. Examples -------- >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.ones_like(x) array([[1, 1, 1], [1, 1, 1]]) >>> y = np.arange(3, dtype=float) >>> y array([0., 1., 2.]) >>> np.ones_like(y) array([1., 1., 1.]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) multiarray.copyto(res, 1, casting='unsafe') return res @set_module('numpy') def full(shape, fill_value, dtype=None, order='C'): """ Return a new array of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar Fill value. dtype : data-type, optional The desired data-type for the array The default, `None`, means `np.array(fill_value).dtype`. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Returns ------- out : ndarray Array of `fill_value` with the given shape, dtype, and order. See Also -------- full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Examples -------- >>> np.full((2, 2), np.inf) array([[inf, inf], [inf, inf]]) >>> np.full((2, 2), 10) array([[10, 10], [10, 10]]) """ if dtype is None: dtype = array(fill_value).dtype a = empty(shape, dtype, order) multiarray.copyto(a, fill_value, casting='unsafe') return a def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None): return (a,) @array_function_dispatch(_full_like_dispatcher) def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): """ Return a full array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. shape : int or sequence of ints, optional. Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. .. versionadded:: 1.17.0 Returns ------- out : ndarray Array of `fill_value` with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full : Return a new array of given shape filled with value. Examples -------- >>> x = np.arange(6, dtype=int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) >>> np.full_like(x, 0.1, dtype=np.double) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) >>> np.full_like(x, np.nan, dtype=np.double) array([nan, nan, nan, nan, nan, nan]) >>> y = np.arange(6, dtype=np.double) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) multiarray.copyto(res, fill_value, casting='unsafe') return res def _count_nonzero_dispatcher(a, axis=None): return (a,) @array_function_dispatch(_count_nonzero_dispatcher) def count_nonzero(a, axis=None): """ Counts the number of non-zero values in the array ``a``. The word "non-zero" is in reference to the Python 2.x built-in method ``__nonzero__()`` (renamed ``__bool__()`` in Python 3.x) of Python objects that tests an object's "truthfulness". For example, any number is considered truthful if it is nonzero, whereas any string is considered truthful if it is not the empty string. Thus, this function (recursively) counts how many elements in ``a`` (and in sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` method evaluated to ``True``. Parameters ---------- a : array_like The array for which to count non-zeros. axis : int or tuple, optional Axis or tuple of axes along which to count non-zeros. Default is None, meaning that non-zeros will be counted along a flattened version of ``a``. .. versionadded:: 1.12.0 Returns ------- count : int or array of int Number of non-zero values in the array along a given axis. Otherwise, the total number of non-zero values in the array is returned. See Also -------- nonzero : Return the coordinates of all the non-zero values. Examples -------- >>> np.count_nonzero(np.eye(4)) 4 >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) 5 >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0) array([1, 1, 1, 1, 1]) >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1) array([2, 3]) """ if axis is None: return multiarray.count_nonzero(a) a = asanyarray(a) # TODO: this works around .astype(bool) not working properly (gh-9847) if np.issubdtype(a.dtype, np.character): a_bool = a != a.dtype.type() else: a_bool = a.astype(np.bool_, copy=False) return a_bool.sum(axis=axis, dtype=np.intp) @set_module('numpy') def isfortran(a): """ Check if the array is Fortran contiguous but *not* C contiguous. This function is obsolete and, because of changes due to relaxed stride checking, its return value for the same array may differ for versions of NumPy >= 1.10.0 and previous versions. If you only want to check if an array is Fortran contiguous use ``a.flags.f_contiguous`` instead. Parameters ---------- a : ndarray Input array. Returns ------- isfortran : bool Returns True if the array is Fortran contiguous but *not* C contiguous. Examples -------- np.array allows to specify whether the array is written in C-contiguous order (last index varies the fastest), or FORTRAN-contiguous order in memory (first index varies the fastest). >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') >>> a array([[1, 2, 3], [4, 5, 6]]) >>> np.isfortran(a) False >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') >>> b array([[1, 2, 3], [4, 5, 6]]) >>> np.isfortran(b) True The transpose of a C-ordered array is a FORTRAN-ordered array. >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') >>> a array([[1, 2, 3], [4, 5, 6]]) >>> np.isfortran(a) False >>> b = a.T >>> b array([[1, 4], [2, 5], [3, 6]]) >>> np.isfortran(b) True C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. >>> np.isfortran(np.array([1, 2], order='FORTRAN')) False """ return a.flags.fnc def _argwhere_dispatcher(a): return (a,) @array_function_dispatch(_argwhere_dispatcher) def argwhere(a): """ Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. This array will have shape ``(N, a.ndim)`` where ``N`` is the number of non-zero items. See Also -------- where, nonzero Notes ----- ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. Examples -------- >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]]) """ # nonzero does not behave well on 0d, so promote to 1d if np.ndim(a) == 0: a = shape_base.atleast_1d(a) # then remove the added dimension return argwhere(a)[:,:0] return transpose(nonzero(a)) def _flatnonzero_dispatcher(a): return (a,) @array_function_dispatch(_flatnonzero_dispatcher) def flatnonzero(a): """ Return indices that are non-zero in the flattened version of a. This is equivalent to np.nonzero(np.ravel(a))[0]. Parameters ---------- a : array_like Input data. Returns ------- res : ndarray Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2]) """ return np.nonzero(np.ravel(a))[0] _mode_from_name_dict = {'v': 0, 's': 1, 'f': 2} def _mode_from_name(mode): if isinstance(mode, basestring): return _mode_from_name_dict[mode.lower()[0]] return mode def _correlate_dispatcher(a, v, mode=None): return (a, v) @array_function_dispatch(_correlate_dispatcher) def correlate(a, v, mode='valid'): """ Cross-correlation of two 1-dimensional sequences. This function computes the correlation as generally defined in signal processing texts:: c_{av}[k] = sum_n a[n+k] * conj(v[n]) with a and v sequences being zero-padded where necessary and conj being the conjugate. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the `convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. old_behavior : bool `old_behavior` was removed in NumPy 1.10. If you need the old behavior, use `multiarray.correlate`. Returns ------- out : ndarray Discrete cross-correlation of `a` and `v`. See Also -------- convolve : Discrete, linear convolution of two one-dimensional sequences. multiarray.correlate : Old, no conjugate, version of correlate. Notes ----- The definition of correlation above is not unique and sometimes correlation may be defined differently. Another common definition is:: c'_{av}[k] = sum_n a[n] conj(v[n+k]) which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. Examples -------- >>> np.correlate([1, 2, 3], [0, 1, 0.5]) array([3.5]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") array([2. , 3.5, 3. ]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") array([0.5, 2. , 3.5, 3. , 0. ]) Using complex sequences: >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) Note that you get the time reversed, complex conjugated result when the two input sequences change places, i.e., ``c_{va}[k] = c^{*}_{av}[-k]``: >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) """ mode = _mode_from_name(mode) return multiarray.correlate2(a, v, mode) def _convolve_dispatcher(a, v, mode=None): return (a, v) @array_function_dispatch(_convolve_dispatcher) def convolve(a, v, mode='full'): """ Returns the discrete, linear convolution of two one-dimensional sequences. The convolution operator is often seen in signal processing, where it models the effect of a linear time-invariant system on a signal [1]_. In probability theory, the sum of two independent random variables is distributed according to the convolution of their individual distributions. If `v` is longer than `a`, the arrays are swapped before computation. Parameters ---------- a : (N,) array_like First one-dimensional input array. v : (M,) array_like Second one-dimensional input array. mode : {'full', 'valid', 'same'}, optional 'full': By default, mode is 'full'. This returns the convolution at each point of overlap, with an output shape of (N+M-1,). At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': Mode 'same' returns output of length ``max(M, N)``. Boundary effects are still visible. 'valid': Mode 'valid' returns output of length ``max(M, N) - min(M, N) + 1``. The convolution product is only given for points where the signals overlap completely. Values outside the signal boundary have no effect. Returns ------- out : ndarray Discrete, linear convolution of `a` and `v`. See Also -------- scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier Transform. scipy.linalg.toeplitz : Used to construct the convolution operator. polymul : Polynomial multiplication. Same output as convolve, but also accepts poly1d objects as input. Notes ----- The discrete convolution operation is defined as .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] It can be shown that a convolution :math:`x(t) * y(t)` in time/space is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier domain, after appropriate padding (padding is necessary to prevent circular convolution). Since multiplication is more efficient (faster) than convolution, the function `scipy.signal.fftconvolve` exploits the FFT to calculate the convolution of large data-sets. References ---------- .. [1] Wikipedia, "Convolution", https://en.wikipedia.org/wiki/Convolution Examples -------- Note how the convolution operator flips the second array before "sliding" the two across one another: >>> np.convolve([1, 2, 3], [0, 1, 0.5]) array([0. , 1. , 2.5, 4. , 1.5]) Only return the middle values of the convolution. Contains boundary effects, where zeros are taken into account: >>> np.convolve([1,2,3],[0,1,0.5], 'same') array([1. , 2.5, 4. ]) The two arrays are of the same length, so there is only one position where they completely overlap: >>> np.convolve([1,2,3],[0,1,0.5], 'valid') array([2.5]) """ a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) if (len(v) > len(a)): a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') mode = _mode_from_name(mode) return multiarray.correlate(a, v[::-1], mode) def _outer_dispatcher(a, b, out=None): return (a, b, out) @array_function_dispatch(_outer_dispatcher) def outer(a, b, out=None): """ Compute the outer product of two vectors. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``, the outer product [1]_ is:: [[a0*b0 a0*b1 ... a0*bN ] [a1*b0 . [ ... . [aM*b0 aM*bN ]] Parameters ---------- a : (M,) array_like First input vector. Input is flattened if not already 1-dimensional. b : (N,) array_like Second input vector. Input is flattened if not already 1-dimensional. out : (M, N) ndarray, optional A location where the result is stored .. versionadded:: 1.9.0 Returns ------- out : (M, N) ndarray ``out[i, j] = a[i] * b[j]`` See also -------- inner einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. ufunc.outer : A generalization to N dimensions and other operations. ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. References ---------- .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd ed., Baltimore, MD, Johns Hopkins University Press, 1996, pg. 8. Examples -------- Make a (*very* coarse) grid for computing a Mandelbrot set: >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.]]) >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) >>> im array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) >>> grid = rl + im >>> grid array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) An example using a "vector" of letters: >>> x = np.array(['a', 'b', 'c'], dtype=object) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], ['c', 'cc', 'ccc']], dtype=object) """ a = asarray(a) b = asarray(b) return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) def _tensordot_dispatcher(a, b, axes=None): return (a, b) @array_function_dispatch(_tensordot_dispatcher) def tensordot(a, b, axes=2): """ Compute tensor dot product along specified axes. Given two tensors, `a` and `b`, and an array_like object containing two array_like objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s elements (components) over the axes specified by ``a_axes`` and ``b_axes``. The third argument can be a single non-negative integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions of `a` and the first ``N`` dimensions of `b` are summed over. Parameters ---------- a, b : array_like Tensors to "dot". axes : int or (2,) array_like * integer_like If an int N, sum over the last N axes of `a` and the first N axes of `b` in order. The sizes of the corresponding axes must match. * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. Returns ------- output : ndarray The tensor dot product of the input. See Also -------- dot, einsum Notes ----- Three common use cases are: * ``axes = 0`` : tensor product :math:`a\\otimes b` * ``axes = 1`` : tensor dot product :math:`a\\cdot b` * ``axes = 2`` : (default) tensor double contraction :math:`a:b` When `axes` is integer_like, the sequence for evaluation will be: first the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and Nth axis in `b` last. When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. Examples -------- A "traditional" example: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) >>> c.shape (5, 2) >>> c array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> # A slower but equivalent way of computing the same... >>> d = np.zeros((5,2)) >>> for i in range(5): ... for j in range(2): ... for k in range(3): ... for n in range(4): ... d[i,j] += a[k,n,i] * b[n,k,j] >>> c == d array([[ True, True], [ True, True], [ True, True], [ True, True], [ True, True]]) An extended example taking advantage of the overloading of + and \\*: >>> a = np.array(range(1, 9)) >>> a.shape = (2, 2, 2) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) >>> A.shape = (2, 2) >>> a; A array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) array([['a', 'b'], ['c', 'd']], dtype=object) >>> np.tensordot(a, A) # third argument default is 2 for double-contraction array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) >>> np.tensordot(a, A, 1) array([[['acc', 'bdd'], ['aaacccc', 'bbbdddd']], [['aaaaacccccc', 'bbbbbdddddd'], ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) array([[[[['a', 'b'], ['c', 'd']], ... >>> np.tensordot(a, A, (0, 1)) array([[['abbbbb', 'cddddd'], ['aabbbbbb', 'ccdddddd']], [['aaabbbbbbb', 'cccddddddd'], ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) >>> np.tensordot(a, A, (2, 1)) array([[['abb', 'cdd'], ['aaabbbb', 'cccdddd']], [['aaaaabbbbbb', 'cccccdddddd'], ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) >>> np.tensordot(a, A, ((0, 1), (0, 1))) array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) >>> np.tensordot(a, A, ((2, 1), (1, 0))) array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) """ try: iter(axes) except Exception: axes_a = list(range(-axes, 0)) axes_b = list(range(0, axes)) else: axes_a, axes_b = axes try: na = len(axes_a) axes_a = list(axes_a) except TypeError: axes_a = [axes_a] na = 1 try: nb = len(axes_b) axes_b = list(axes_b) except TypeError: axes_b = [axes_b] nb = 1 a, b = asarray(a), asarray(b) as_ = a.shape nda = a.ndim bs = b.shape ndb = b.ndim equal = True if na != nb: equal = False else: for k in range(na): if as_[axes_a[k]] != bs[axes_b[k]]: equal = False break if axes_a[k] < 0: axes_a[k] += nda if axes_b[k] < 0: axes_b[k] += ndb if not equal: raise ValueError("shape-mismatch for sum") # Move the axes to sum over to the end of "a" # and to the front of "b" notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = 1 for axis in axes_a: N2 *= as_[axis] newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = 1 for axis in axes_b: N2 *= bs[axis] newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) oldb = [bs[axis] for axis in notin] at = a.transpose(newaxes_a).reshape(newshape_a) bt = b.transpose(newaxes_b).reshape(newshape_b) res = dot(at, bt) return res.reshape(olda + oldb) def _roll_dispatcher(a, shift, axis=None): return (a,) @array_function_dispatch(_roll_dispatcher) def roll(a, shift, axis=None): """ Roll array elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Parameters ---------- a : array_like Input array. shift : int or tuple of ints The number of places by which elements are shifted. If a tuple, then `axis` must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while `axis` is a tuple of ints, then the same value is used for all given axes. axis : int or tuple of ints, optional Axis or axes along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : ndarray Output array, with the same shape as `a`. See Also -------- rollaxis : Roll the specified axis backwards, until it lies in a given position. Notes ----- .. versionadded:: 1.12.0 Supports rolling over multiple dimensions simultaneously. Examples -------- >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) >>> np.roll(x, -2) array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) >>> x2 = np.reshape(x, (2,5)) >>> x2 array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> np.roll(x2, 1) array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]) >>> np.roll(x2, -1) array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]) >>> np.roll(x2, 1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, -1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, 1, axis=1) array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]) >>> np.roll(x2, -1, axis=1) array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]) """ a = asanyarray(a) if axis is None: return roll(a.ravel(), shift, 0).reshape(a.shape) else: axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) broadcasted = broadcast(shift, axis) if broadcasted.ndim > 1: raise ValueError( "'shift' and 'axis' should be scalars or 1D sequences") shifts = {ax: 0 for ax in range(a.ndim)} for sh, ax in broadcasted: shifts[ax] += sh rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. if offset: # (original, result), (original, result) rolls[ax] = ((slice(None, -offset), slice(offset, None)), (slice(-offset, None), slice(None, offset))) result = empty_like(a) for indices in itertools.product(*rolls): arr_index, res_index = zip(*indices) result[res_index] = a[arr_index] return result def _rollaxis_dispatcher(a, axis, start=None): return (a,) @array_function_dispatch(_rollaxis_dispatcher) def rollaxis(a, axis, start=0): """ Roll the specified axis backwards, until it lies in a given position. This function continues to be supported for backward compatibility, but you should prefer `moveaxis`. The `moveaxis` function was added in NumPy 1.11. Parameters ---------- a : ndarray Input array. axis : int The axis to roll backwards. The positions of the other axes do not change relative to one another. start : int, optional The axis is rolled until it lies before this position. The default, 0, results in a "complete" roll. Returns ------- res : ndarray For NumPy >= 1.10.0 a view of `a` is always returned. For earlier NumPy versions a view of `a` is returned only if the order of the axes is changed, otherwise the input array is returned. See Also -------- moveaxis : Move array axes to new positions. roll : Roll the elements of an array by a number of positions along a given axis. Examples -------- >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) >>> np.rollaxis(a, 2).shape (5, 3, 4, 6) >>> np.rollaxis(a, 1, 4).shape (3, 5, 6, 4) """ n = a.ndim axis = normalize_axis_index(axis, n) if start < 0: start += n msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" if not (0 <= start < n + 1): raise AxisError(msg % ('start', -n, 'start', n + 1, start)) if axis < start: # it's been removed start -= 1 if axis == start: return a[...] axes = list(range(0, n)) axes.remove(axis) axes.insert(start, axis) return a.transpose(axes) def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): """ Normalizes an axis argument into a tuple of non-negative integer axes. This handles shorthands such as ``1`` and converts them to ``(1,)``, as well as performing the handling of negative indices covered by `normalize_axis_index`. By default, this forbids axes from being specified multiple times. Used internally by multi-axis-checking logic. .. versionadded:: 1.13.0 Parameters ---------- axis : int, iterable of int The un-normalized index or indices of the axis. ndim : int The number of dimensions of the array that `axis` should be normalized against. argname : str, optional A prefix to put before the error message, typically the name of the argument. allow_duplicate : bool, optional If False, the default, disallow an axis from being specified twice. Returns ------- normalized_axes : tuple of int The normalized axis index, such that `0 <= normalized_axis < ndim` Raises ------ AxisError If any axis provided is out of range ValueError If an axis is repeated See also -------- normalize_axis_index : normalizing a single scalar axis """ # Optimization to speed-up the most common cases. if type(axis) not in (tuple, list): try: axis = [operator.index(axis)] except TypeError: pass # Going via an iterator directly is slower than via list comprehension. axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) if not allow_duplicate and len(set(axis)) != len(axis): if argname: raise ValueError('repeated axis in `{}` argument'.format(argname)) else: raise ValueError('repeated axis') return axis def _moveaxis_dispatcher(a, source, destination): return (a,) @array_function_dispatch(_moveaxis_dispatcher) def moveaxis(a, source, destination): """ Move axes of an array to new positions. Other axes remain in their original order. .. versionadded:: 1.11.0 Parameters ---------- a : np.ndarray The array whose axes should be reordered. source : int or sequence of int Original positions of the axes to move. These must be unique. destination : int or sequence of int Destination positions for each of the original axes. These must also be unique. Returns ------- result : np.ndarray Array with moved axes. This array is a view of the input array. See Also -------- transpose: Permute the dimensions of an array. swapaxes: Interchange two axes of an array. Examples -------- >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) >>> np.moveaxis(x, -1, 0).shape (5, 3, 4) These all achieve the same result: >>> np.transpose(x).shape (5, 4, 3) >>> np.swapaxes(x, 0, -1).shape (5, 4, 3) >>> np.moveaxis(x, [0, 1], [-1, -2]).shape (5, 4, 3) >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape (5, 4, 3) """ try: # allow duck-array types if they define transpose transpose = a.transpose except AttributeError: a = asarray(a) transpose = a.transpose source = normalize_axis_tuple(source, a.ndim, 'source') destination = normalize_axis_tuple(destination, a.ndim, 'destination') if len(source) != len(destination): raise ValueError('`source` and `destination` arguments must have ' 'the same number of elements') order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) result = transpose(order) return result # fix hack in scipy which imports this function def _move_axis_to_0(a, axis): return moveaxis(a, axis, 0) def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): return (a, b) @array_function_dispatch(_cross_dispatcher) def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): """ Return the cross product of two (arrays of) vectors. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the last axis of `a` and `b` by default, and these axes can have dimensions 2 or 3. Where the dimension of either `a` or `b` is 2, the third component of the input vector is assumed to be zero and the cross product calculated accordingly. In cases where both input vectors have dimension 2, the z-component of the cross product is returned. Parameters ---------- a : array_like Components of the first vector(s). b : array_like Components of the second vector(s). axisa : int, optional Axis of `a` that defines the vector(s). By default, the last axis. axisb : int, optional Axis of `b` that defines the vector(s). By default, the last axis. axisc : int, optional Axis of `c` containing the cross product vector(s). Ignored if both input vectors have dimension 2, as the return is scalar. By default, the last axis. axis : int, optional If defined, the axis of `a`, `b` and `c` that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. Returns ------- c : ndarray Vector cross product(s). Raises ------ ValueError When the dimension of the vector(s) in `a` and/or `b` does not equal 2 or 3. See Also -------- inner : Inner product outer : Outer product. ix_ : Construct index arrays. Notes ----- .. versionadded:: 1.9.0 Supports full broadcasting of the inputs. Examples -------- Vector cross-product. >>> x = [1, 2, 3] >>> y = [4, 5, 6] >>> np.cross(x, y) array([-3, 6, -3]) One vector with dimension 2. >>> x = [1, 2] >>> y = [4, 5, 6] >>> np.cross(x, y) array([12, -6, -3]) Equivalently: >>> x = [1, 2, 0] >>> y = [4, 5, 6] >>> np.cross(x, y) array([12, -6, -3]) Both vectors with dimension 2. >>> x = [1,2] >>> y = [4,5] >>> np.cross(x, y) array(-3) Multiple vector cross-products. Note that the direction of the cross product vector is defined by the `right-hand rule`. >>> x = np.array([[1,2,3], [4,5,6]]) >>> y = np.array([[4,5,6], [1,2,3]]) >>> np.cross(x, y) array([[-3, 6, -3], [ 3, -6, 3]]) The orientation of `c` can be changed using the `axisc` keyword. >>> np.cross(x, y, axisc=0) array([[-3, 3], [ 6, -6], [-3, 3]]) Change the vector definition of `x` and `y` using `axisa` and `axisb`. >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) >>> np.cross(x, y) array([[ -6, 12, -6], [ 0, 0, 0], [ 6, -12, 6]]) >>> np.cross(x, y, axisa=0, axisb=0) array([[-24, 48, -24], [-30, 60, -30], [-36, 72, -36]]) """ if axis is not None: axisa, axisb, axisc = (axis,) * 3 a = asarray(a) b = asarray(b) # Check axisa and axisb are within bounds axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') # Move working axis to the end of the shape a = moveaxis(a, axisa, -1) b = moveaxis(b, axisb, -1) msg = ("incompatible dimensions for cross product\n" "(dimension must be 2 or 3)") if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): raise ValueError(msg) # Create the output array shape = broadcast(a[..., 0], b[..., 0]).shape if a.shape[-1] == 3 or b.shape[-1] == 3: shape += (3,) # Check axisc is within bounds axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') dtype = promote_types(a.dtype, b.dtype) cp = empty(shape, dtype) # create local aliases for readability a0 = a[..., 0] a1 = a[..., 1] if a.shape[-1] == 3: a2 = a[..., 2] b0 = b[..., 0] b1 = b[..., 1] if b.shape[-1] == 3: b2 = b[..., 2] if cp.ndim != 0 and cp.shape[-1] == 3: cp0 = cp[..., 0] cp1 = cp[..., 1] cp2 = cp[..., 2] if a.shape[-1] == 2: if b.shape[-1] == 2: # a0 * b1 - a1 * b0 multiply(a0, b1, out=cp) cp -= a1 * b0 return cp else: assert b.shape[-1] == 3 # cp0 = a1 * b2 - 0 (a2 = 0) # cp1 = 0 - a0 * b2 (a2 = 0) # cp2 = a0 * b1 - a1 * b0 multiply(a1, b2, out=cp0) multiply(a0, b2, out=cp1) negative(cp1, out=cp1) multiply(a0, b1, out=cp2) cp2 -= a1 * b0 else: assert a.shape[-1] == 3 if b.shape[-1] == 3: # cp0 = a1 * b2 - a2 * b1 # cp1 = a2 * b0 - a0 * b2 # cp2 = a0 * b1 - a1 * b0 multiply(a1, b2, out=cp0) tmp = array(a2 * b1) cp0 -= tmp multiply(a2, b0, out=cp1) multiply(a0, b2, out=tmp) cp1 -= tmp multiply(a0, b1, out=cp2) multiply(a1, b0, out=tmp) cp2 -= tmp else: assert b.shape[-1] == 2 # cp0 = 0 - a2 * b1 (b2 = 0) # cp1 = a2 * b0 - 0 (b2 = 0) # cp2 = a0 * b1 - a1 * b0 multiply(a2, b1, out=cp0) negative(cp0, out=cp0) multiply(a2, b0, out=cp1) multiply(a0, b1, out=cp2) cp2 -= a1 * b0 return moveaxis(cp, -1, axisc) little_endian = (sys.byteorder == 'little') @set_module('numpy') def indices(dimensions, dtype=int, sparse=False): """ Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0, 1, ... varying only along the corresponding axis. Parameters ---------- dimensions : sequence of ints The shape of the grid. dtype : dtype, optional Data type of the result. sparse : boolean, optional Return a sparse representation of the grid instead of a dense representation. Default is False. .. versionadded:: 1.17 Returns ------- grid : one ndarray or tuple of ndarrays If sparse is False: Returns one array of grid indices, ``grid.shape = (len(dimensions),) + tuple(dimensions)``. If sparse is True: Returns a tuple of arrays, with ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with dimensions[i] in the ith place See Also -------- mgrid, ogrid, meshgrid Notes ----- The output shape in the dense case is obtained by prepending the number of dimensions in front of the tuple of dimensions, i.e. if `dimensions` is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is ``(N, r0, ..., rN-1)``. The subarrays ``grid[k]`` contains the N-D array of indices along the ``k-th`` axis. Explicitly:: grid[k, i0, i1, ..., iN-1] = ik Examples -------- >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], [1, 1, 1]]) >>> grid[1] # column indices array([[0, 1, 2], [0, 1, 2]]) The indices can be used as an index into an array. >>> x = np.arange(20).reshape(5, 4) >>> row, col = np.indices((2, 3)) >>> x[row, col] array([[0, 1, 2], [4, 5, 6]]) Note that it would be more straightforward in the above example to extract the required elements directly with ``x[:2, :3]``. If sparse is set to true, the grid will be returned in a sparse representation. >>> i, j = np.indices((2, 3), sparse=True) >>> i.shape (2, 1) >>> j.shape (1, 3) >>> i # row indices array([[0], [1]]) >>> j # column indices array([[0, 1, 2]]) """ dimensions = tuple(dimensions) N = len(dimensions) shape = (1,)*N if sparse: res = tuple() else: res = empty((N,)+dimensions, dtype=dtype) for i, dim in enumerate(dimensions): idx = arange(dim, dtype=dtype).reshape( shape[:i] + (dim,) + shape[i+1:] ) if sparse: res = res + (idx,) else: res[i] = idx return res @set_module('numpy') def fromfunction(function, shape, **kwargs): """ Construct an array by executing a function over each coordinate. The resulting array therefore has a value ``fn(x, y, z)`` at coordinate ``(x, y, z)``. Parameters ---------- function : callable The function is called with N parameters, where N is the rank of `shape`. Each parameter represents the coordinates of the array varying along a specific axis. For example, if `shape` were ``(2, 2)``, then the parameters would be ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` shape : (N,) tuple of ints Shape of the output array, which also determines the shape of the coordinate arrays passed to `function`. dtype : data-type, optional Data-type of the coordinate arrays passed to `function`. By default, `dtype` is float. Returns ------- fromfunction : any The result of the call to `function` is passed back directly. Therefore the shape of `fromfunction` is completely determined by `function`. If `function` returns a scalar value, the shape of `fromfunction` would not match the `shape` parameter. See Also -------- indices, meshgrid Notes ----- Keywords other than `dtype` are passed to `function`. Examples -------- >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) array([[ True, False, False], [False, True, False], [False, False, True]]) >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) """ dtype = kwargs.pop('dtype', float) args = indices(shape, dtype=dtype) return function(*args, **kwargs) def _frombuffer(buf, dtype, shape, order): return frombuffer(buf, dtype=dtype).reshape(shape, order=order) @set_module('numpy') def isscalar(num): """ Returns True if the type of `num` is a scalar type. Parameters ---------- num : any Input argument, can be of any type and shape. Returns ------- val : bool True if `num` is a scalar type, False if it is not. See Also -------- ndim : Get the number of dimensions of an array Notes ----- In almost all cases ``np.ndim(x) == 0`` should be used instead of this function, as that will also return true for 0d arrays. This is how numpy overloads functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` argument to `histogram`. Some key differences: +--------------------------------------+---------------+-------------------+ | x |``isscalar(x)``|``np.ndim(x) == 0``| +======================================+===============+===================+ | PEP 3141 numeric objects (including | ``True`` | ``True`` | | builtins) | | | +--------------------------------------+---------------+-------------------+ | builtin string and buffer objects | ``True`` | ``True`` | +--------------------------------------+---------------+-------------------+ | other builtin objects, like | ``False`` | ``True`` | | `pathlib.Path`, `Exception`, | | | | the result of `re.compile` | | | +--------------------------------------+---------------+-------------------+ | third-party objects like | ``False`` | ``True`` | | `matplotlib.figure.Figure` | | | +--------------------------------------+---------------+-------------------+ | zero-dimensional numpy arrays | ``False`` | ``True`` | +--------------------------------------+---------------+-------------------+ | other numpy arrays | ``False`` | ``False`` | +--------------------------------------+---------------+-------------------+ | `list`, `tuple`, and other sequence | ``False`` | ``False`` | | objects | | | +--------------------------------------+---------------+-------------------+ Examples -------- >>> np.isscalar(3.1) True >>> np.isscalar(np.array(3.1)) False >>> np.isscalar([3.1]) False >>> np.isscalar(False) True >>> np.isscalar('numpy') True NumPy supports PEP 3141 numbers: >>> from fractions import Fraction >>> np.isscalar(Fraction(5, 17)) True >>> from numbers import Number >>> np.isscalar(Number()) True """ return (isinstance(num, generic) or type(num) in ScalarType or isinstance(num, numbers.Number)) @set_module('numpy') def binary_repr(num, width=None): """ Return the binary representation of the input number as a string. For negative numbers, if width is not given, a minus sign is added to the front. If width is given, the two's complement of the number is returned, with respect to that width. In a two's-complement system negative numbers are represented by the two's complement of the absolute value. This is the most common method of representing signed integers on computers [1]_. A N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. Parameters ---------- num : int Only an integer decimal number can be used. width : int, optional The length of the returned string if `num` is positive, or the length of the two's complement if `num` is negative, provided that `width` is at least a sufficient number of bits for `num` to be represented in the designated form. If the `width` value is insufficient, it will be ignored, and `num` will be returned in binary (`num` > 0) or two's complement (`num` < 0) form with its width equal to the minimum number of bits needed to represent the number in the designated form. This behavior is deprecated and will later raise an error. .. deprecated:: 1.12.0 Returns ------- bin : str Binary representation of `num` or two's complement of `num`. See Also -------- base_repr: Return a string representation of a number in the given base system. bin: Python's built-in binary representation generator of an integer. Notes ----- `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x faster. References ---------- .. [1] Wikipedia, "Two's complement", https://en.wikipedia.org/wiki/Two's_complement Examples -------- >>> np.binary_repr(3) '11' >>> np.binary_repr(-3) '-11' >>> np.binary_repr(3, width=4) '0011' The two's complement is returned when the input number is negative and width is specified: >>> np.binary_repr(-3, width=3) '101' >>> np.binary_repr(-3, width=5) '11101' """ def warn_if_insufficient(width, binwidth): if width is not None and width < binwidth: warnings.warn( "Insufficient bit width provided. This behavior " "will raise an error in the future.", DeprecationWarning, stacklevel=3) # Ensure that num is a Python integer to avoid overflow or unwanted # casts to floating point. num = operator.index(num) if num == 0: return '0' * (width or 1) elif num > 0: binary = bin(num)[2:] binwidth = len(binary) outwidth = (binwidth if width is None else max(binwidth, width)) warn_if_insufficient(width, binwidth) return binary.zfill(outwidth) else: if width is None: return '-' + bin(-num)[2:] else: poswidth = len(bin(-num)[2:]) # See gh-8679: remove extra digit # for numbers at boundaries. if 2**(poswidth - 1) == -num: poswidth -= 1 twocomp = 2**(poswidth + 1) + num binary = bin(twocomp)[2:] binwidth = len(binary) outwidth = max(binwidth, width) warn_if_insufficient(width, binwidth) return '1' * (outwidth - binwidth) + binary @set_module('numpy') def base_repr(number, base=2, padding=0): """ Return a string representation of a number in the given base system. Parameters ---------- number : int The value to convert. Positive and negative values are handled. base : int, optional Convert `number` to the `base` number system. The valid range is 2-36, the default value is 2. padding : int, optional Number of zeros padded on the left. Default is 0 (no padding). Returns ------- out : str String representation of `number` in `base` system. See Also -------- binary_repr : Faster version of `base_repr` for base 2. Examples -------- >>> np.base_repr(5) '101' >>> np.base_repr(6, 5) '11' >>> np.base_repr(7, base=5, padding=3) '00012' >>> np.base_repr(10, base=16) 'A' >>> np.base_repr(32, base=16) '20' """ digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if base > len(digits): raise ValueError("Bases greater than 36 not handled in base_repr.") elif base < 2: raise ValueError("Bases less than 2 not handled in base_repr.") num = abs(number) res = [] while num: res.append(digits[num % base]) num //= base if padding: res.append('0' * padding) if number < 0: res.append('-') return ''.join(reversed(res or '0')) # These are all essentially abbreviations # These might wind up in a special abbreviations module def _maketup(descr, val): dt = dtype(descr) # Place val in all scalar tuples: fields = dt.fields if fields is None: return val else: res = [_maketup(fields[name][0], val) for name in dt.names] return tuple(res) @set_module('numpy') def identity(n, dtype=None): """ Return the identity array. The identity array is a square array with ones on the main diagonal. Parameters ---------- n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional Data-type of the output. Defaults to ``float``. Returns ------- out : ndarray `n` x `n` array with its main diagonal set to one, and all other elements 0. Examples -------- >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) """ from numpy import eye return eye(n, dtype=dtype) def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): return (a, b) @array_function_dispatch(_allclose_dispatcher) def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Returns True if two arrays are element-wise equal within a tolerance. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. If either array contains one or more NaNs, False is returned. Infs are treated as equal if they are in the same place and of the same sign in both arrays. Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b`. .. versionadded:: 1.10.0 Returns ------- allclose : bool Returns True if the two arrays are equal within the given tolerance; False otherwise. See Also -------- isclose, all, any, equal Notes ----- If the following equation is element-wise True, then allclose returns True. absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that ``allclose(a, b)`` might be different from ``allclose(b, a)`` in some rare cases. The comparison of `a` and `b` uses standard broadcasting, which means that `a` and `b` need not have the same shape in order for ``allclose(a, b)`` to evaluate to True. The same is true for `equal` but not `array_equal`. Examples -------- >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) False >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) True >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) False >>> np.allclose([1.0, np.nan], [1.0, np.nan]) False >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) True """ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) return bool(res) def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): return (a, b) @array_function_dispatch(_isclose_dispatcher) def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. .. warning:: The default `atol` is not appropriate for comparing numbers that are much smaller than one (see Notes). Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. Returns ------- y : array_like Returns a boolean array of where `a` and `b` are equal within the given tolerance. If both `a` and `b` are scalars, returns a single boolean value. See Also -------- allclose Notes ----- .. versionadded:: 1.7.0 For finite values, isclose uses the following equation to test whether two floating point values are equivalent. absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) Unlike the built-in `math.isclose`, the above equation is not symmetric in `a` and `b` -- it assumes `b` is the reference value -- so that `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, the default value of atol is not zero, and is used to determine what small values should be considered close to zero. The default value is appropriate for expected values of order unity: if the expected values are significantly smaller than one, it can result in false positives. `atol` should be carefully selected for the use case at hand. A zero value for `atol` will result in `False` if either `a` or `b` is zero. Examples -------- >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) array([ True, False]) >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) array([ True, True]) >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) array([False, True]) >>> np.isclose([1.0, np.nan], [1.0, np.nan]) array([ True, False]) >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) array([ True, True]) >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) array([ True, False]) >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) array([False, False]) >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) array([ True, True]) >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) array([False, True]) """ def within_tol(x, y, atol, rtol): with errstate(invalid='ignore'): return less_equal(abs(x-y), atol + rtol * abs(y)) x = asanyarray(a) y = asanyarray(b) # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses # (e.g., for numpy.ma). dt = multiarray.result_type(y, 1.) y = array(y, dtype=dt, copy=False, subok=True) xfin = isfinite(x) yfin = isfinite(y) if all(xfin) and all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = zeros_like(finite, subok=True) # Because we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * ones_like(cond) y = y * ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN both_nan = isnan(x) & isnan(y) # Needed to treat masked arrays correctly. = True would not work. cond[both_nan] = both_nan[both_nan] return cond[()] # Flatten 0d arrays to scalars def _array_equal_dispatcher(a1, a2): return (a1, a2) @array_function_dispatch(_array_equal_dispatcher) def array_equal(a1, a2): """ True if two arrays have the same shape and elements, False otherwise. Parameters ---------- a1, a2 : array_like Input arrays. Returns ------- b : bool Returns True if the arrays are equal. See Also -------- allclose: Returns True if two arrays are element-wise equal within a tolerance. array_equiv: Returns True if input arrays are shape consistent and all elements equal. Examples -------- >>> np.array_equal([1, 2], [1, 2]) True >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) True >>> np.array_equal([1, 2], [1, 2, 3]) False >>> np.array_equal([1, 2], [1, 4]) False """ try: a1, a2 = asarray(a1), asarray(a2) except Exception: return False if a1.shape != a2.shape: return False return bool(asarray(a1 == a2).all()) def _array_equiv_dispatcher(a1, a2): return (a1, a2) @array_function_dispatch(_array_equiv_dispatcher) def array_equiv(a1, a2): """ Returns True if input arrays are shape consistent and all elements equal. Shape consistent means they are either the same shape, or one input array can be broadcasted to create the same shape as the other one. Parameters ---------- a1, a2 : array_like Input arrays. Returns ------- out : bool True if equivalent, False otherwise. Examples -------- >>> np.array_equiv([1, 2], [1, 2]) True >>> np.array_equiv([1, 2], [1, 3]) False Showing the shape equivalence: >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) True >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) False >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) False """ try: a1, a2 = asarray(a1), asarray(a2) except Exception: return False try: multiarray.broadcast(a1, a2) except Exception: return False return bool(asarray(a1 == a2).all()) Inf = inf = infty = Infinity = PINF nan = NaN = NAN False_ = bool_(False) True_ = bool_(True) def extend_all(module): existing = set(__all__) mall = getattr(module, '__all__') for a in mall: if a not in existing: __all__.append(a) from .umath import * from .numerictypes import * from . import fromnumeric from .fromnumeric import * from . import arrayprint from .arrayprint import * from . import _asarray from ._asarray import * from . import _ufunc_config from ._ufunc_config import * extend_all(fromnumeric) extend_all(umath) extend_all(numerictypes) extend_all(arrayprint) extend_all(_asarray) extend_all(_ufunc_config)
from __future__ import division, absolute_import, print_function import os import pytest import numpy as np from numpy.testing import assert_raises, assert_equal from . import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [_path('src', 'parameter', 'constant_real.f90'), _path('src', 'parameter', 'constant_integer.f90'), _path('src', 'parameter', 'constant_both.f90'), _path('src', 'parameter', 'constant_compound.f90'), _path('src', 'parameter', 'constant_non_compound.f90'), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] assert_raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert_equal(x, [0 + 1 + 2*6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] assert_raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
MSeifert04/numpy
numpy/f2py/tests/test_parameter.py
numpy/core/numeric.py
from __future__ import division, absolute_import, print_function import numpy as np from numpy.core._rational_tests import rational from numpy.testing import ( assert_equal, assert_array_equal, assert_raises, assert_, assert_raises_regex, assert_warns, ) from numpy.lib.stride_tricks import ( as_strided, broadcast_arrays, _broadcast_shape, broadcast_to ) def assert_shapes_correct(input_shapes, expected_shape): # Broadcast a list of arrays with the given input shapes and check the # common output shape. inarrays = [np.zeros(s) for s in input_shapes] outarrays = broadcast_arrays(*inarrays) outshapes = [a.shape for a in outarrays] expected = [expected_shape] * len(inarrays) assert_equal(outshapes, expected) def assert_incompatible_shapes_raise(input_shapes): # Broadcast a list of arrays with the given (incompatible) input shapes # and check that they raise a ValueError. inarrays = [np.zeros(s) for s in input_shapes] assert_raises(ValueError, broadcast_arrays, *inarrays) def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): # Broadcast two shapes against each other and check that the data layout # is the same as if a ufunc did the broadcasting. x0 = np.zeros(shape0, dtype=int) # Note that multiply.reduce's identity element is 1.0, so when shape1==(), # this gives the desired n==1. n = int(np.multiply.reduce(shape1)) x1 = np.arange(n).reshape(shape1) if transposed: x0 = x0.T x1 = x1.T if flipped: x0 = x0[::-1] x1 = x1[::-1] # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the # result should be exactly the same as the broadcasted view of x1. y = x0 + x1 b0, b1 = broadcast_arrays(x0, x1) assert_array_equal(y, b1) def test_same(): x = np.arange(10) y = np.arange(10) bx, by = broadcast_arrays(x, y) assert_array_equal(x, bx) assert_array_equal(y, by) def test_broadcast_kwargs(): # ensure that a TypeError is appropriately raised when # np.broadcast_arrays() is called with any keyword # argument other than 'subok' x = np.arange(10) y = np.arange(10) with assert_raises_regex(TypeError, r'broadcast_arrays\(\) got an unexpected keyword*'): broadcast_arrays(x, y, dtype='float64') def test_one_off(): x = np.array([[1, 2, 3]]) y = np.array([[1], [2], [3]]) bx, by = broadcast_arrays(x, y) bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) by0 = bx0.T assert_array_equal(bx0, bx) assert_array_equal(by0, by) def test_same_input_shapes(): # Check that the final shape is just the input shape. data = [ (), (1,), (3,), (0, 1), (0, 3), (1, 0), (3, 0), (1, 3), (3, 1), (3, 3), ] for shape in data: input_shapes = [shape] # Single input. assert_shapes_correct(input_shapes, shape) # Double input. input_shapes2 = [shape, shape] assert_shapes_correct(input_shapes2, shape) # Triple input. input_shapes3 = [shape, shape, shape] assert_shapes_correct(input_shapes3, shape) def test_two_compatible_by_ones_input_shapes(): # Check that two different input shapes of the same length, but some have # ones, broadcast to the correct shape. data = [ [[(1,), (3,)], (3,)], [[(1, 3), (3, 3)], (3, 3)], [[(3, 1), (3, 3)], (3, 3)], [[(1, 3), (3, 1)], (3, 3)], [[(1, 1), (3, 3)], (3, 3)], [[(1, 1), (1, 3)], (1, 3)], [[(1, 1), (3, 1)], (3, 1)], [[(1, 0), (0, 0)], (0, 0)], [[(0, 1), (0, 0)], (0, 0)], [[(1, 0), (0, 1)], (0, 0)], [[(1, 1), (0, 0)], (0, 0)], [[(1, 1), (1, 0)], (1, 0)], [[(1, 1), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) # Reverse the input shapes since broadcasting should be symmetric. assert_shapes_correct(input_shapes[::-1], expected_shape) def test_two_compatible_by_prepending_ones_input_shapes(): # Check that two different input shapes (of different lengths) broadcast # to the correct shape. data = [ [[(), (3,)], (3,)], [[(3,), (3, 3)], (3, 3)], [[(3,), (3, 1)], (3, 3)], [[(1,), (3, 3)], (3, 3)], [[(), (3, 3)], (3, 3)], [[(1, 1), (3,)], (1, 3)], [[(1,), (3, 1)], (3, 1)], [[(1,), (1, 3)], (1, 3)], [[(), (1, 3)], (1, 3)], [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], [[(0,), (0, 0)], (0, 0)], [[(0,), (0, 1)], (0, 0)], [[(1,), (0, 0)], (0, 0)], [[(), (0, 0)], (0, 0)], [[(1, 1), (0,)], (1, 0)], [[(1,), (0, 1)], (0, 1)], [[(1,), (1, 0)], (1, 0)], [[(), (1, 0)], (1, 0)], [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) # Reverse the input shapes since broadcasting should be symmetric. assert_shapes_correct(input_shapes[::-1], expected_shape) def test_incompatible_shapes_raise_valueerror(): # Check that a ValueError is raised for incompatible shapes. data = [ [(3,), (4,)], [(2, 3), (2,)], [(3,), (3,), (4,)], [(1, 3, 4), (2, 3, 3)], ] for input_shapes in data: assert_incompatible_shapes_raise(input_shapes) # Reverse the input shapes since broadcasting should be symmetric. assert_incompatible_shapes_raise(input_shapes[::-1]) def test_same_as_ufunc(): # Check that the data layout is the same as if a ufunc did the operation. data = [ [[(1,), (3,)], (3,)], [[(1, 3), (3, 3)], (3, 3)], [[(3, 1), (3, 3)], (3, 3)], [[(1, 3), (3, 1)], (3, 3)], [[(1, 1), (3, 3)], (3, 3)], [[(1, 1), (1, 3)], (1, 3)], [[(1, 1), (3, 1)], (3, 1)], [[(1, 0), (0, 0)], (0, 0)], [[(0, 1), (0, 0)], (0, 0)], [[(1, 0), (0, 1)], (0, 0)], [[(1, 1), (0, 0)], (0, 0)], [[(1, 1), (1, 0)], (1, 0)], [[(1, 1), (0, 1)], (0, 1)], [[(), (3,)], (3,)], [[(3,), (3, 3)], (3, 3)], [[(3,), (3, 1)], (3, 3)], [[(1,), (3, 3)], (3, 3)], [[(), (3, 3)], (3, 3)], [[(1, 1), (3,)], (1, 3)], [[(1,), (3, 1)], (3, 1)], [[(1,), (1, 3)], (1, 3)], [[(), (1, 3)], (1, 3)], [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], [[(0,), (0, 0)], (0, 0)], [[(0,), (0, 1)], (0, 0)], [[(1,), (0, 0)], (0, 0)], [[(), (0, 0)], (0, 0)], [[(1, 1), (0,)], (1, 0)], [[(1,), (0, 1)], (0, 1)], [[(1,), (1, 0)], (1, 0)], [[(), (1, 0)], (1, 0)], [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_same_as_ufunc(input_shapes[0], input_shapes[1], "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) # Reverse the input shapes since broadcasting should be symmetric. assert_same_as_ufunc(input_shapes[1], input_shapes[0]) # Try them transposed, too. assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) # ... and flipped for non-rank-0 inputs in order to test negative # strides. if () not in input_shapes: assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) def test_broadcast_to_succeeds(): data = [ [np.array(0), (0,), np.array(0)], [np.array(0), (1,), np.zeros(1)], [np.array(0), (3,), np.zeros(3)], [np.ones(1), (1,), np.ones(1)], [np.ones(1), (2,), np.ones(2)], [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], [np.arange(3), (3,), np.arange(3)], [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], # test if shape is not a tuple [np.ones(0), 0, np.ones(0)], [np.ones(1), 1, np.ones(1)], [np.ones(1), 2, np.ones(2)], # these cases with size 0 are strange, but they reproduce the behavior # of broadcasting with ufuncs (see test_same_as_ufunc above) [np.ones(1), (0,), np.ones(0)], [np.ones((1, 2)), (0, 2), np.ones((0, 2))], [np.ones((2, 1)), (2, 0), np.ones((2, 0))], ] for input_array, shape, expected in data: actual = broadcast_to(input_array, shape) assert_array_equal(expected, actual) def test_broadcast_to_raises(): data = [ [(0,), ()], [(1,), ()], [(3,), ()], [(3,), (1,)], [(3,), (2,)], [(3,), (4,)], [(1, 2), (2, 1)], [(1, 1), (1,)], [(1,), -1], [(1,), (-1,)], [(1, 2), (-1, 2)], ] for orig_shape, target_shape in data: arr = np.zeros(orig_shape) assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) def test_broadcast_shape(): # broadcast_shape is already exercized indirectly by broadcast_arrays assert_equal(_broadcast_shape(), ()) assert_equal(_broadcast_shape([1, 2]), (2,)) assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) # regression tests for gh-5862 assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) def test_as_strided(): a = np.array([None]) a_view = as_strided(a) expected = np.array([None]) assert_array_equal(a_view, np.array([None])) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) expected = np.array([1, 3]) assert_array_equal(a_view, expected) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) assert_array_equal(a_view, expected) # Regression test for gh-5081 dt = np.dtype([('num', 'i4'), ('obj', 'O')]) a = np.empty((4,), dtype=dt) a['num'] = np.arange(1, 5) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) expected_num = [[1, 2, 3, 4]] * 3 expected_obj = [[None]*4]*3 assert_equal(a_view.dtype, dt) assert_array_equal(expected_num, a_view['num']) assert_array_equal(expected_obj, a_view['obj']) # Make sure that void types without fields are kept unchanged a = np.empty((4,), dtype='V4') a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) # Make sure that the only type that could fail is properly handled dt = np.dtype({'names': [''], 'formats': ['V4']}) a = np.empty((4,), dtype=dt) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) # Custom dtypes should not be lost (gh-9161) r = [rational(i) for i in range(4)] a = np.array(r, dtype=rational) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) assert_array_equal([r] * 3, a_view) def as_strided_writeable(): arr = np.ones(10) view = as_strided(arr, writeable=False) assert_(not view.flags.writeable) # Check that writeable also is fine: view = as_strided(arr, writeable=True) assert_(view.flags.writeable) view[...] = 3 assert_array_equal(arr, np.full_like(arr, 3)) # Test that things do not break down for readonly: arr.flags.writeable = False view = as_strided(arr, writeable=False) view = as_strided(arr, writeable=True) assert_(not view.flags.writeable) class VerySimpleSubClass(np.ndarray): def __new__(cls, *args, **kwargs): kwargs['subok'] = True return np.array(*args, **kwargs).view(cls) class SimpleSubClass(VerySimpleSubClass): def __new__(cls, *args, **kwargs): kwargs['subok'] = True self = np.array(*args, **kwargs).view(cls) self.info = 'simple' return self def __array_finalize__(self, obj): self.info = getattr(obj, 'info', '') + ' finalized' def test_subclasses(): # test that subclass is preserved only if subok=True a = VerySimpleSubClass([1, 2, 3, 4]) assert_(type(a) is VerySimpleSubClass) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) assert_(type(a_view) is np.ndarray) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is VerySimpleSubClass) # test that if a subclass has __array_finalize__, it is used a = SimpleSubClass([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') # similar tests for broadcast_arrays b = np.arange(len(a)).reshape(-1, 1) a_view, b_view = broadcast_arrays(a, b) assert_(type(a_view) is np.ndarray) assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) a_view, b_view = broadcast_arrays(a, b, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) # and for broadcast_to shape = (2, 4) a_view = broadcast_to(a, shape) assert_(type(a_view) is np.ndarray) assert_(a_view.shape == shape) a_view = broadcast_to(a, shape, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(a_view.shape == shape) def test_writeable(): # broadcast_to should return a readonly array original = np.array([1, 2, 3]) result = broadcast_to(original, (2, 3)) assert_equal(result.flags.writeable, False) assert_raises(ValueError, result.__setitem__, slice(None), 0) # but the result of broadcast_arrays needs to be writeable, to # preserve backwards compatibility for is_broadcast, results in [(False, broadcast_arrays(original,)), (True, broadcast_arrays(0, original))]: for result in results: # This will change to False in a future version if is_broadcast: with assert_warns(FutureWarning): assert_equal(result.flags.writeable, True) with assert_warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) else: # No warning: assert_equal(result.flags.writeable, True) for results in [broadcast_arrays(original), broadcast_arrays(0, original)]: for result in results: # resets the warn_on_write DeprecationWarning result.flags.writeable = True # check: no warning emitted assert_equal(result.flags.writeable, True) result[:] = 0 # keep readonly input readonly original.flags.writeable = False _, result = broadcast_arrays(0, original) assert_equal(result.flags.writeable, False) # regression test for GH6491 shape = (2,) strides = [0] tricky_array = as_strided(np.array(0), shape, strides) other = np.zeros((1,)) first, second = broadcast_arrays(tricky_array, other) assert_(first.shape == second.shape) def test_writeable_memoryview(): # The result of broadcast_arrays exports as a non-writeable memoryview # because otherwise there is no good way to opt in to the new behaviour # (i.e. you would need to set writeable to False explicitly). # See gh-13929. original = np.array([1, 2, 3]) for is_broadcast, results in [(False, broadcast_arrays(original,)), (True, broadcast_arrays(0, original))]: for result in results: # This will change to False in a future version if is_broadcast: # memoryview(result, writable=True) will give warning but cannot # be tested using the python API. assert memoryview(result).readonly else: assert not memoryview(result).readonly def test_reference_types(): input_array = np.array('a', dtype=object) expected = np.array(['a'] * 3, dtype=object) actual = broadcast_to(input_array, (3,)) assert_array_equal(expected, actual) actual, _ = broadcast_arrays(input_array, np.ones(3)) assert_array_equal(expected, actual)
from __future__ import division, absolute_import, print_function import os import pytest import numpy as np from numpy.testing import assert_raises, assert_equal from . import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [_path('src', 'parameter', 'constant_real.f90'), _path('src', 'parameter', 'constant_integer.f90'), _path('src', 'parameter', 'constant_both.f90'), _path('src', 'parameter', 'constant_compound.f90'), _path('src', 'parameter', 'constant_non_compound.f90'), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] assert_raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert_equal(x, [0 + 1 + 2*6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] assert_raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
MSeifert04/numpy
numpy/f2py/tests/test_parameter.py
numpy/lib/tests/test_stride_tricks.py
"""Defines a multi-dimensional array and useful procedures for Numerical computation. Functions - array - NumPy Array construction - zeros - Return an array of all zeros - empty - Return an uninitialized array - shape - Return shape of sequence or array - rank - Return number of dimensions - size - Return number of elements in entire array or a certain dimension - fromstring - Construct array from (byte) string - take - Select sub-arrays using sequence of indices - put - Set sub-arrays using sequence of 1-D indices - putmask - Set portion of arrays using a mask - reshape - Return array with new shape - repeat - Repeat elements of array - choose - Construct new array from indexed array tuple - correlate - Correlate two 1-d arrays - searchsorted - Search for element in 1-d array - sum - Total sum over a specified dimension - average - Average, possibly weighted, over axis or array. - cumsum - Cumulative sum over a specified dimension - product - Total product over a specified dimension - cumproduct - Cumulative product over a specified dimension - alltrue - Logical and over an entire axis - sometrue - Logical or over an entire axis - allclose - Tests if sequences are essentially equal More Functions: - arange - Return regularly spaced array - asarray - Guarantee NumPy array - convolve - Convolve two 1-d arrays - swapaxes - Exchange axes - concatenate - Join arrays together - transpose - Permute axes - sort - Sort elements of array - argsort - Indices of sorted array - argmax - Index of largest value - argmin - Index of smallest value - inner - Innerproduct of two arrays - dot - Dot product (matrix multiplication) - outer - Outerproduct of two arrays - resize - Return array with arbitrary new shape - indices - Tuple of indices - fromfunction - Construct array from universal function - diagonal - Return diagonal array - trace - Trace of array - dump - Dump array to file object (pickle) - dumps - Return pickled string representing data - load - Return array stored in file object - loads - Return array from pickled string - ravel - Return array as 1-D - nonzero - Indices of nonzero elements for 1-D array - shape - Shape of array - where - Construct array from binary result - compress - Elements of array where condition is true - clip - Clip array between two values - ones - Array of all ones - identity - 2-D identity array (matrix) (Universal) Math Functions add logical_or exp subtract logical_xor log multiply logical_not log10 divide maximum sin divide_safe minimum sinh conjugate bitwise_and sqrt power bitwise_or tan absolute bitwise_xor tanh negative invert ceil greater left_shift fabs greater_equal right_shift floor less arccos arctan2 less_equal arcsin fmod equal arctan hypot not_equal cos around logical_and cosh sign arccosh arcsinh arctanh """ from __future__ import division, absolute_import, print_function depends = ['testing'] global_symbols = ['*']
from __future__ import division, absolute_import, print_function import os import pytest import numpy as np from numpy.testing import assert_raises, assert_equal from . import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [_path('src', 'parameter', 'constant_real.f90'), _path('src', 'parameter', 'constant_integer.f90'), _path('src', 'parameter', 'constant_both.f90'), _path('src', 'parameter', 'constant_compound.f90'), _path('src', 'parameter', 'constant_non_compound.f90'), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] assert_raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert_equal(x, [0 + 1 + 2*6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] assert_raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
MSeifert04/numpy
numpy/f2py/tests/test_parameter.py
numpy/core/info.py
# Added Fortran compiler support to config. Currently useful only for # try_compile call. try_run works but is untested for most of Fortran # compilers (they must define linker_exe first). # Pearu Peterson from __future__ import division, absolute_import, print_function import os, signal import warnings import sys import subprocess import textwrap from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file from distutils.ccompiler import CompileError, LinkError import distutils from numpy.distutils.exec_command import filepath_from_subprocess_output from numpy.distutils.mingw32ccompiler import generate_manifest from numpy.distutils.command.autodist import (check_gcc_function_attribute, check_gcc_function_attribute_with_intrinsics, check_gcc_variable_attribute, check_inline, check_restrict, check_compiler_gcc4) from numpy.distutils.compat import get_exception LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' class config(old_config): old_config.user_options += [ ('fcompiler=', None, "specify the Fortran compiler type"), ] def initialize_options(self): self.fcompiler = None old_config.initialize_options(self) def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler if sys.platform == 'win32' and (self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw')): # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: # initialize call query_vcvarsall, which throws an IOError, and # causes an error along the way without much information. We try to # catch it here, hoping it is early enough, and print an helpful # message instead of Error: None. if not self.compiler.initialized: try: self.compiler.initialize() except IOError: e = get_exception() msg = textwrap.dedent("""\ Could not initialize compiler instance: do you have Visual Studio installed? If you are trying to build with MinGW, please use "python setup.py build -c mingw32" instead. If you have Visual Studio installed, check it is correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, VS 2010 for >= 3.3). Original exception was: %s, and the Compiler class was %s ============================================================================""") \ % (e, self.compiler.__class__.__name__) print(textwrap.dedent("""\ ============================================================================""")) raise distutils.errors.DistutilsPlatformError(msg) # After MSVC is initialized, add an explicit /MANIFEST to linker # flags. See issues gh-4245 and gh-4101 for details. Also # relevant are issues 4431 and 16296 on the Python bug tracker. from distutils import msvc9compiler if msvc9compiler.get_build_version() >= 10: for ldflags in [self.compiler.ldflags_shared, self.compiler.ldflags_shared_debug]: if '/MANIFEST' not in ldflags: ldflags.append('/MANIFEST') if not isinstance(self.fcompiler, FCompiler): self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, c_compiler=self.compiler) if self.fcompiler is not None: self.fcompiler.customize(self.distribution) if self.fcompiler.get_version(): self.fcompiler.customize_cmd(self) self.fcompiler.show_customization() def _wrap_method(self, mth, lang, args): from distutils.ccompiler import CompileError from distutils.errors import DistutilsExecError save_compiler = self.compiler if lang in ['f77', 'f90']: self.compiler = self.fcompiler try: ret = mth(*((self,)+args)) except (DistutilsExecError, CompileError): str(get_exception()) self.compiler = save_compiler raise CompileError self.compiler = save_compiler return ret def _compile (self, body, headers, include_dirs, lang): src, obj = self._wrap_method(old_config._compile, lang, (body, headers, include_dirs, lang)) # _compile in unixcompiler.py sometimes creates .d dependency files. # Clean them up. self.temp_files.append(obj + '.d') return src, obj def _link (self, body, headers, include_dirs, libraries, library_dirs, lang): if self.compiler.compiler_type=='msvc': libraries = (libraries or [])[:] library_dirs = (library_dirs or [])[:] if lang in ['f77', 'f90']: lang = 'c' # always use system linker when using MSVC compiler if self.fcompiler: for d in self.fcompiler.library_dirs or []: # correct path when compiling in Cygwin but with # normal Win Python if d.startswith('/usr/lib'): try: d = subprocess.check_output(['cygpath', '-w', d]) except (OSError, subprocess.CalledProcessError): pass else: d = filepath_from_subprocess_output(d) library_dirs.append(d) for libname in self.fcompiler.libraries or []: if libname not in libraries: libraries.append(libname) for libname in libraries: if libname.startswith('msvc'): continue fileexists = False for libdir in library_dirs or []: libfile = os.path.join(libdir, '%s.lib' % (libname)) if os.path.isfile(libfile): fileexists = True break if fileexists: continue # make g77-compiled static libs available to MSVC fileexists = False for libdir in library_dirs: libfile = os.path.join(libdir, 'lib%s.a' % (libname)) if os.path.isfile(libfile): # copy libname.a file to name.lib so that MSVC linker # can find it libfile2 = os.path.join(libdir, '%s.lib' % (libname)) copy_file(libfile, libfile2) self.temp_files.append(libfile2) fileexists = True break if fileexists: continue log.warn('could not find library %r in directories %s' \ % (libname, library_dirs)) elif self.compiler.compiler_type == 'mingw32': generate_manifest(self) return self._wrap_method(old_config._link, lang, (body, headers, include_dirs, libraries, library_dirs, lang)) def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): self._check_compiler() return self.try_compile( "/* we need a dummy line to make distutils happy */", [header], include_dirs) def check_decl(self, symbol, headers=None, include_dirs=None): self._check_compiler() body = textwrap.dedent(""" int main(void) { #ifndef %s (void) %s; #endif ; return 0; }""") % (symbol, symbol) return self.try_compile(body, headers, include_dirs) def check_macro_true(self, symbol, headers=None, include_dirs=None): self._check_compiler() body = textwrap.dedent(""" int main(void) { #if %s #else #error false or undefined macro #endif ; return 0; }""") % (symbol,) return self.try_compile(body, headers, include_dirs) def check_type(self, type_name, headers=None, include_dirs=None, library_dirs=None): """Check type availability. Return True if the type can be compiled, False otherwise""" self._check_compiler() # First check the type can be compiled body = textwrap.dedent(r""" int main(void) { if ((%(name)s *) 0) return 0; if (sizeof (%(name)s)) return 0; } """) % {'name': type_name} st = False try: try: self._compile(body % {'type': type_name}, headers, include_dirs, 'c') st = True except distutils.errors.CompileError: st = False finally: self._clean() return st def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): """Check size of a given type.""" self._check_compiler() # First check the type can be compiled body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; test_array [0] = 0 ; return 0; } """) self._compile(body % {'type': type_name}, headers, include_dirs, 'c') self._clean() if expected: body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; test_array [0] = 0 ; return 0; } """) for size in expected: try: self._compile(body % {'type': type_name, 'size': size}, headers, include_dirs, 'c') self._clean() return size except CompileError: pass # this fails to *compile* if size > sizeof(type) body = textwrap.dedent(r""" typedef %(type)s npy_check_sizeof_type; int main (void) { static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; test_array [0] = 0 ; return 0; } """) # The principle is simple: we first find low and high bounds of size # for the type, where low/high are looked up on a log scale. Then, we # do a binary search to find the exact size between low and high low = 0 mid = 0 while True: try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') self._clean() break except CompileError: #log.info("failure to test for bound %d" % mid) low = mid + 1 mid = 2 * mid + 1 high = mid # Binary search: while low != high: mid = (high - low) // 2 + low try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') self._clean() high = mid except CompileError: low = mid + 1 return low def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None): # clean up distutils's config a bit: add void to main(), and # return a value. self._check_compiler() body = [] if decl: if type(decl) == str: body.append(decl) else: body.append("int %s (void);" % func) # Handle MSVC intrinsics: force MS compiler to make a function call. # Useful to test for some functions when built with optimization on, to # avoid build error because the intrinsic and our 'fake' test # declaration do not match. body.append("#ifdef _MSC_VER") body.append("#pragma function(%s)" % func) body.append("#endif") body.append("int main (void) {") if call: if call_args is None: call_args = '' body.append(" %s(%s);" % (func, call_args)) else: body.append(" %s;" % func) body.append(" return 0;") body.append("}") body = '\n'.join(body) + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) def check_funcs_once(self, funcs, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None): """Check a list of functions at once. This is useful to speed up things, since all the functions in the funcs list will be put in one compilation unit. Arguments --------- funcs : seq list of functions to test include_dirs : seq list of header paths libraries : seq list of libraries to link the code snippet to library_dirs : seq list of library paths decl : dict for every (key, value), the declaration in the value will be used for function in key. If a function is not in the dictionary, no declaration will be used. call : dict for every item (f, value), if the value is True, a call will be done to the function f. """ self._check_compiler() body = [] if decl: for f, v in decl.items(): if v: body.append("int %s (void);" % f) # Handle MS intrinsics. See check_func for more info. body.append("#ifdef _MSC_VER") for func in funcs: body.append("#pragma function(%s)" % func) body.append("#endif") body.append("int main (void) {") if call: for f in funcs: if f in call and call[f]: if not (call_args and f in call_args and call_args[f]): args = '' else: args = call_args[f] body.append(" %s(%s);" % (f, args)) else: body.append(" %s;" % f) else: for f in funcs: body.append(" %s;" % f) body.append(" return 0;") body.append("}") body = '\n'.join(body) + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) def check_inline(self): """Return the inline keyword recognized by the compiler, empty string otherwise.""" return check_inline(self) def check_restrict(self): """Return the restrict keyword recognized by the compiler, empty string otherwise.""" return check_restrict(self) def check_compiler_gcc4(self): """Return True if the C compiler is gcc >= 4.""" return check_compiler_gcc4(self) def check_gcc_function_attribute(self, attribute, name): return check_gcc_function_attribute(self, attribute, name) def check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include): return check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include) def check_gcc_variable_attribute(self, attribute): return check_gcc_variable_attribute(self, attribute) def get_output(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c", use_tee=None): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Returns the exit status code of the program and its output. """ # 2008-11-16, RemoveMe warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" "Usage of get_output is deprecated: please do not \n" "use it anymore, and avoid configuration checks \n" "involving running executable on the target machine.\n" "+++++++++++++++++++++++++++++++++++++++++++++++++\n", DeprecationWarning, stacklevel=2) self._check_compiler() exitcode, output = 255, '' try: grabber = GrabStdout() try: src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) grabber.restore() except Exception: output = grabber.data grabber.restore() raise exe = os.path.join('.', exe) try: # specify cwd arg for consistency with # historic usage pattern of exec_command() # also, note that exe appears to be a string, # which exec_command() handled, but we now # use a list for check_output() -- this assumes # that exe is always a single command output = subprocess.check_output([exe], cwd='.') except subprocess.CalledProcessError as exc: exitstatus = exc.returncode output = '' except OSError: # preserve the EnvironmentError exit status # used historically in exec_command() exitstatus = 127 output = '' else: output = filepath_from_subprocess_output(output) if hasattr(os, 'WEXITSTATUS'): exitcode = os.WEXITSTATUS(exitstatus) if os.WIFSIGNALED(exitstatus): sig = os.WTERMSIG(exitstatus) log.error('subprocess exited with signal %d' % (sig,)) if sig == signal.SIGINT: # control-C raise KeyboardInterrupt else: exitcode = exitstatus log.info("success!") except (CompileError, LinkError): log.info("failure.") self._clean() return exitcode, output class GrabStdout(object): def __init__(self): self.sys_stdout = sys.stdout self.data = '' sys.stdout = self def write (self, data): self.sys_stdout.write(data) self.data += data def flush (self): self.sys_stdout.flush() def restore(self): sys.stdout = self.sys_stdout
from __future__ import division, absolute_import, print_function import os import pytest import numpy as np from numpy.testing import assert_raises, assert_equal from . import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [_path('src', 'parameter', 'constant_real.f90'), _path('src', 'parameter', 'constant_integer.f90'), _path('src', 'parameter', 'constant_both.f90'), _path('src', 'parameter', 'constant_compound.f90'), _path('src', 'parameter', 'constant_non_compound.f90'), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] assert_raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert_equal(x, [0 + 1 + 2*6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] assert_raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
MSeifert04/numpy
numpy/f2py/tests/test_parameter.py
numpy/distutils/command/config.py
from __future__ import division, absolute_import, print_function import os import re import sys import subprocess from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.misc_util import make_temp_file from distutils import log compilers = ['IBMFCompiler'] class IBMFCompiler(FCompiler): compiler_type = 'ibm' description = 'IBM XL Fortran Compiler' version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)' #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 executables = { 'version_cmd' : ["<F77>", "-qversion"], 'compiler_f77' : ["xlf"], 'compiler_fix' : ["xlf90", "-qfixed"], 'compiler_f90' : ["xlf90"], 'linker_so' : ["xlf95"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_version(self,*args,**kwds): version = FCompiler.get_version(self,*args,**kwds) if version is None and sys.platform.startswith('aix'): # use lslpp to find out xlf version lslpp = find_executable('lslpp') xlf = find_executable('xlf') if os.path.exists(xlf) and os.path.exists(lslpp): try: o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) except (OSError, subprocess.CalledProcessError): pass else: m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o) if m: version = m.group('version') xlf_dir = '/etc/opt/ibmcmp/xlf' if version is None and os.path.isdir(xlf_dir): # linux: # If the output of xlf does not contain version info # (that's the case with xlf 8.1, for instance) then # let's try another method: l = sorted(os.listdir(xlf_dir)) l.reverse() l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] if l: from distutils.version import LooseVersion self.version = version = LooseVersion(l[0]) return version def get_flags(self): return ['-qextname'] def get_flags_debug(self): return ['-g'] def get_flags_linker_so(self): opt = [] if sys.platform=='darwin': opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') else: opt.append('-bshared') version = self.get_version(ok_status=[0, 40]) if version is not None: if sys.platform.startswith('aix'): xlf_cfg = '/etc/xlf.cfg' else: xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) with open(xlf_cfg, 'r') as fi: crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match for line in fi: m = crt1_match(line) if m: fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) else: fo.write(line) fo.close() opt.append('-F'+new_cfg) return opt def get_flags_opt(self): return ['-O3'] if __name__ == '__main__': from numpy.distutils import customized_fcompiler log.set_verbosity(2) print(customized_fcompiler(compiler='ibm').get_version())
from __future__ import division, absolute_import, print_function import os import pytest import numpy as np from numpy.testing import assert_raises, assert_equal from . import util def _path(*a): return os.path.join(*((os.path.dirname(__file__),) + a)) class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [_path('src', 'parameter', 'constant_real.f90'), _path('src', 'parameter', 'constant_integer.f90'), _path('src', 'parameter', 'constant_both.f90'), _path('src', 'parameter', 'constant_compound.f90'), _path('src', 'parameter', 'constant_non_compound.f90'), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] assert_raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert_equal(x, [0 + 1 + 2*6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] assert_raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] assert_raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert_equal(x, [0 + 1 + 2*3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] assert_raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
MSeifert04/numpy
numpy/f2py/tests/test_parameter.py
numpy/distutils/fcompiler/ibm.py
# Authors: Adam Li <adam2392@gmail.com> # Alex Rockhill <aprockhill@mailbox.org> # License: BSD Style. from functools import partial from ...utils import verbose from ..utils import (has_dataset, _data_path, _data_path_doc, _get_version, _version_doc) has_epilepsy_ecog_data = partial(has_dataset, name='epilepsy_ecog') @verbose def data_path( path=None, force_update=False, update_path=True, download=True, verbose=None): # noqa: D103 return _data_path(path=path, force_update=force_update, update_path=update_path, name='epilepsy_ecog', download=download) data_path.__doc__ = _data_path_doc.format( name='epilepsy_ecog', conf='MNE_DATASETS_EPILEPSY_ECOG_PATH') def get_version(): # noqa: D103 return _get_version('epilepsy_ecog') get_version.__doc__ = _version_doc.format(name='epilepsy_ecog')
# Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import copy import os from os import path as op import shutil import numpy as np from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal import pytest import mne from mne import (pick_types, read_annotations, create_info, events_from_annotations, make_forward_solution) from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf, RawArray from mne.io.compensator import get_current_comp from mne.io.ctf.constants import CTF from mne.io.tests.test_raw import _test_raw_reader from mne.tests.test_annotations import _assert_annotations_equal from mne.utils import _clean_names, catch_logging, _stamp_to_dt from mne.datasets import testing, spm_face, brainstorm from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' somato_fname = op.join( brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', 'subj001_somatosensory_20111109_01_AUX-f.ds' ) block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, } single_trials = ( ctf_fname_continuous, ctf_fname_1_trial, ) ctf_fnames = tuple(sorted(block_sizes.keys())) @pytest.mark.slowtest @testing.requires_testing_data def test_read_ctf(tmpdir): """Test CTF reader.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') # Create a dummy .eeg file so we can test our reading/application of it os.mkdir(op.join(temp_dir, 'randpos')) ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) picks = pick_types(raw.info, meg=False, eeg=True) pos = np.random.RandomState(42).randn(len(picks), 3) fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') # Create a bad file with open(fake_eeg_fname, 'wb') as fid: fid.write('foo\n'.encode('ascii')) pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) # Create a good file with open(fake_eeg_fname, 'wb') as fid: for ii, ch_num in enumerate(picks): args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( '%0.5f' % x for x in 100 * pos[ii]) # convert to cm fid.write(('\t'.join(args) + '\n').encode('ascii')) pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = read_raw_ctf(ctf_eeg_fname) # read modified data pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, rtol=1e-5, atol=1e-5) assert (pos_read == pos_read_old).mean() < 0.1 shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) # Create a version with no hc, starting out *with* EEG pos (error) os.mkdir(op.join(temp_dir, 'nohc')) ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) os.remove(remove_base + '.hc') with pytest.warns(RuntimeWarning, match='MISC channel'): pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) os.remove(remove_base + '.eeg') shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) # All our files use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] for fname in use_fnames: raw_c = read_raw_fif(fname + '_raw.fif', preload=True) with pytest.warns(None): # sometimes matches "MISC channel" raw = read_raw_ctf(fname) # check info match assert_array_equal(raw.ch_names, raw_c.ch_names) assert_allclose(raw.times, raw_c.times) assert_allclose(raw._cals, raw_c._cals) assert (raw.info['meas_id']['version'] == raw_c.info['meas_id']['version'] + 1) for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], rtol=1e-4, atol=1e-7) # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. # Consider adding meas_date to below checks once this is addressed in # MNE-C for key in ('acq_pars', 'acq_stim', 'bads', 'ch_names', 'custom_ref_applied', 'description', 'events', 'experimenter', 'highpass', 'line_freq', 'lowpass', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info'): assert raw.info[key] == raw_c.info[key], key if op.basename(fname) not in single_trials: # We don't force buffer size to be smaller like MNE-C assert raw.buffer_size_sec == raw_c.buffer_size_sec assert len(raw.info['comps']) == len(raw_c.info['comps']) for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): for key in ('colcals', 'rowcals'): assert_allclose(c1[key], c2[key]) assert c1['save_calibrated'] == c2['save_calibrated'] for key in ('row_names', 'col_names', 'nrow', 'ncol'): assert_array_equal(c1['data'][key], c2['data'][key]) assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, rtol=1e-5) assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], raw_c.info['hpi_results'][0]['coord_trans']['trans'], rtol=1e-5, atol=1e-7) assert len(raw.info['chs']) == len(raw_c.info['chs']) for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', 'range', 'coord_frame', 'coil_type', 'logno'): if c1['ch_name'] == 'RMSP' and \ 'catch-alp-good-f' in fname and \ key in ('kind', 'unit', 'coord_frame', 'coil_type', 'logno'): continue # XXX see below... if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: # XXX MNE-C bug that this is not set assert c2[key] == FIFF.FIFFV_COIL_NONE continue assert c1[key] == c2[key], key for key in ('cal',): assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # XXX 2016/02/24: fixed bug with normal computation that used # to exist, once mne-C tools are updated we should update our FIF # conversion files, then the slices can go away (and the check # can be combined with that for "cal") for key in ('loc',): if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: continue if (c2[key][:3] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][:3] assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) if (c2[key][3:] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][9:12] assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # Make sure all digitization points are in the MNE head coord frame for p in raw.info['dig']: assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ 'dig points must be in FIFF.FIFFV_COORD_HEAD' if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file raw.info['dig'] = raw.info['dig'][:-10] # XXX: Next test would fail because c-tools assign the fiducials from # CTF data as HPI. Should eventually clarify/unify with Matti. # assert_dig_allclose(raw.info, raw_c.info) # check data match raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) raw_read = read_raw_fif(out_fname) # so let's check tricky cases based on sample boundaries rng = np.random.RandomState(0) pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) assert bnd == raw._raw_extras[0]['block_size'] assert bnd == block_sizes[op.basename(fname)] slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None)) if len(raw.times) >= 2 * bnd: # at least two complete blocks slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)) for sl_time in slices: assert_allclose(raw[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations if 'testdata_ctf_short.ds' in fname: assert 'bad' in raw.annotations.description[0] assert_allclose(raw.annotations.onset, [2.15]) assert_allclose(raw.annotations.duration, [0.0225]) with pytest.raises(TypeError, match='path-like'): read_raw_ctf(1) with pytest.raises(FileNotFoundError, match='does not exist'): read_raw_ctf(ctf_fname_continuous + 'foo.ds') # test ignoring of system clock read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') with pytest.raises(ValueError, match='system_clock'): read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') @testing.requires_testing_data def test_rawctf_clean_names(): """Test RawCTF _clean_names method.""" # read test data with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), clean_names=True) test_channel_names = _clean_names(raw.ch_names) test_info_comps = copy.deepcopy(raw.info['comps']) # channel names should not be cleaned by default assert raw.ch_names != test_channel_names chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] assert chs_ch_names != test_channel_names for test_comp, comp in zip(test_info_comps, raw.info['comps']): for key in ('row_names', 'col_names'): assert not array_equal(_clean_names(test_comp['data'][key]), comp['data'][key]) # channel names should be cleaned if clean_names=True assert raw_cleaned.ch_names == test_channel_names for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): assert ch['ch_name'] == test_ch_name for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): for key in ('row_names', 'col_names'): assert _clean_names(test_comp['data'][key]) == comp['data'][key] @spm_face.requires_spm_data def test_read_spm_ctf(): """Test CTF reader with omitted samples.""" data_path = spm_face.data_path() raw_fname = op.join(data_path, 'MEG', 'spm', 'SPM_CTF_MEG_example_faces1_3D.ds') raw = read_raw_ctf(raw_fname) extras = raw._raw_extras[0] assert extras['n_samp'] == raw.n_times assert extras['n_samp'] != extras['n_samp_tot'] # Test that LPA, nasion and RPA are correct. coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) cardinals = {d['ident']: d['r'] for d in raw.info['dig']} assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord assert cardinals[1][1] < cardinals[2][1] # y coord assert cardinals[3][1] < cardinals[2][1] # y coord for key in cardinals.keys(): assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord @testing.requires_testing_data @pytest.mark.parametrize('comp_grade', [0, 1]) def test_saving_picked(tmpdir, comp_grade): """Test saving picked CTF instances.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) raw.crop(0, 1).load_data() assert raw.compensation_grade == get_current_comp(raw.info) == 0 assert len(raw.info['comps']) == 5 pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) raw.apply_gradient_compensation(comp_grade) with catch_logging() as log: raw_pick = raw.copy().pick_types(**pick_kwargs) assert len(raw.info['comps']) == 5 assert len(raw_pick.info['comps']) == 0 log = log.getvalue() assert 'Removing 5 compensators' in log raw_pick.save(out_fname, overwrite=True) # should work raw2 = read_raw_fif(out_fname) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 raw2 = read_raw_fif(out_fname, preload=True) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 @brainstorm.bst_raw.requires_bstraw_data def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6) @testing.requires_testing_data def test_read_ctf_annotations_smoke_test(): """Test reading CTF marker file. `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading of whatever is in the MarkerFile.mrk. """ EXPECTED_ONSET = [ 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, 4.57, 4.7125, 4.85583333, 4.99833333 ] fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') annot = read_annotations(fname) assert_allclose(annot.onset, EXPECTED_ONSET) raw = read_raw_ctf(fname) _assert_annotations_equal(raw.annotations, annot, 1e-6) def _read_res4_mag_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: ch['grad_order_no'] = 1 return res def _bad_res4_grad_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: ch['grad_order_no'] = 1 break return res @testing.requires_testing_data def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch): """Test CTF reader with mag comps and bad comps.""" path = op.join(ctf_dir, ctf_fname_continuous) raw_orig = read_raw_ctf(path) assert raw_orig.compensation_grade == 0 monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) raw_mag_comp = read_raw_ctf(path) assert raw_mag_comp.compensation_grade == 0 sphere = mne.make_sphere_model() src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) assert src[0]['nuse'] == 26 for grade in (0, 1): raw_orig.apply_gradient_compensation(grade) raw_mag_comp.apply_gradient_compensation(grade) args = (None, src, sphere, True, False) fwd_orig = make_forward_solution(raw_orig.info, *args) fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) with pytest.raises(RuntimeError, match='inconsistent compensation grade'): read_raw_ctf(path)
rkmaddox/mne-python
mne/io/ctf/tests/test_ctf.py
mne/datasets/epilepsy_ecog/_data.py
# Authors: Denis A. Engemann <denis.engemann@gmail.com> # Teon Brooks <teon.brooks@gmail.com> # # simplified BSD-3 license import datetime import time import numpy as np from .egimff import _read_raw_egi_mff from .events import _combine_triggers from ..base import BaseRaw from ..utils import _read_segments_file, _create_chs from ..meas_info import _empty_info from ..constants import FIFF from ...utils import verbose, logger, warn, _validate_type, _check_fname def _read_header(fid): """Read EGI binary header.""" version = np.fromfile(fid, '<i4', 1)[0] if version > 6 & ~np.bitwise_and(version, 6): version = version.byteswap().astype(np.uint32) else: raise ValueError('Watchout. This does not seem to be a simple ' 'binary EGI file.') def my_fread(*x, **y): return np.fromfile(*x, **y)[0] info = dict( version=version, year=my_fread(fid, '>i2', 1), month=my_fread(fid, '>i2', 1), day=my_fread(fid, '>i2', 1), hour=my_fread(fid, '>i2', 1), minute=my_fread(fid, '>i2', 1), second=my_fread(fid, '>i2', 1), millisecond=my_fread(fid, '>i4', 1), samp_rate=my_fread(fid, '>i2', 1), n_channels=my_fread(fid, '>i2', 1), gain=my_fread(fid, '>i2', 1), bits=my_fread(fid, '>i2', 1), value_range=my_fread(fid, '>i2', 1) ) unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0 precision = np.bitwise_and(version, 6) if precision == 0: raise RuntimeError('Floating point precision is undefined.') if unsegmented: info.update(dict(n_categories=0, n_segments=1, n_samples=np.fromfile(fid, '>i4', 1)[0], n_events=np.fromfile(fid, '>i2', 1)[0], event_codes=[], category_names=[], category_lengths=[], pre_baseline=0)) for event in range(info['n_events']): event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1')) info['event_codes'].append(event_codes) else: raise NotImplementedError('Only continuous files are supported') info['unsegmented'] = unsegmented info['dtype'], info['orig_format'] = {2: ('>i2', 'short'), 4: ('>f4', 'float'), 6: ('>f8', 'double')}[precision] info['dtype'] = np.dtype(info['dtype']) return info def _read_events(fid, info): """Read events.""" events = np.zeros([info['n_events'], info['n_segments'] * info['n_samples']]) fid.seek(36 + info['n_events'] * 4, 0) # skip header for si in range(info['n_samples']): # skip data channels fid.seek(info['n_channels'] * info['dtype'].itemsize, 1) # read event channels events[:, si] = np.fromfile(fid, info['dtype'], info['n_events']) return events @verbose def read_raw_egi(input_fname, eog=None, misc=None, include=None, exclude=None, preload=False, channel_naming='E%d', verbose=None): """Read EGI simple binary as raw object. .. note:: This function attempts to create a synthetic trigger channel. See the Notes section below. Parameters ---------- input_fname : path-like Path to the raw file. Files with an extension .mff are automatically considered to be EGI's native MFF format files. eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Default is None. misc : list or tuple Names of channels or list of indices that should be designated MISC channels. Default is None. include : None | list The event channels to be ignored when creating the synthetic trigger. Defaults to None. Note. Overrides ``exclude`` parameter. exclude : None | list The event channels to be ignored when creating the synthetic trigger. Defaults to None. If None, channels that have more than one event and the ``sync`` and ``TREV`` channels will be ignored. %(preload)s .. versionadded:: 0.11 channel_naming : str Channel naming convention for the data channels. Defaults to 'E%%d' (resulting in channel names 'E1', 'E2', 'E3'...). The effective default prior to 0.14.0 was 'EEG %%03d'. .. versionadded:: 0.14.0 %(verbose)s Returns ------- raw : instance of RawEGI A Raw object containing EGI data. See Also -------- mne.io.Raw : Documentation of attribute and methods. Notes ----- The trigger channel names are based on the arbitrary user dependent event codes used. However this function will attempt to generate a **synthetic trigger channel** named ``STI 014`` in accordance with the general Neuromag / MNE naming pattern. The event_id assignment equals ``np.arange(n_events) + 1``. The resulting ``event_id`` mapping is stored as attribute to the resulting raw object but will be ignored when saving to a fiff. Note. The trigger channel is artificially constructed based on timestamps received by the Netstation. As a consequence, triggers have only short durations. This step will fail if events are not mutually exclusive. """ _validate_type(input_fname, 'path-like', 'input_fname') input_fname = str(input_fname) if input_fname.endswith('.mff'): return _read_raw_egi_mff(input_fname, eog, misc, include, exclude, preload, channel_naming, verbose) return RawEGI(input_fname, eog, misc, include, exclude, preload, channel_naming, verbose) class RawEGI(BaseRaw): """Raw object from EGI simple binary file.""" @verbose def __init__(self, input_fname, eog=None, misc=None, include=None, exclude=None, preload=False, channel_naming='E%d', verbose=None): # noqa: D102 input_fname = _check_fname(input_fname, 'read', True, 'input_fname') if eog is None: eog = [] if misc is None: misc = [] with open(input_fname, 'rb') as fid: # 'rb' important for py3k logger.info('Reading EGI header from %s...' % input_fname) egi_info = _read_header(fid) logger.info(' Reading events ...') egi_events = _read_events(fid, egi_info) # update info + jump if egi_info['value_range'] != 0 and egi_info['bits'] != 0: cal = egi_info['value_range'] / 2. ** egi_info['bits'] else: cal = 1e-6 logger.info(' Assembling measurement info ...') event_codes = [] if egi_info['n_events'] > 0: event_codes = list(egi_info['event_codes']) if include is None: exclude_list = ['sync', 'TREV'] if exclude is None else exclude exclude_inds = [i for i, k in enumerate(event_codes) if k in exclude_list] more_excludes = [] if exclude is None: for ii, event in enumerate(egi_events): if event.sum() <= 1 and event_codes[ii]: more_excludes.append(ii) if len(exclude_inds) + len(more_excludes) == len(event_codes): warn('Did not find any event code with more than one ' 'event.', RuntimeWarning) else: exclude_inds.extend(more_excludes) exclude_inds.sort() include_ = [i for i in np.arange(egi_info['n_events']) if i not in exclude_inds] include_names = [k for i, k in enumerate(event_codes) if i in include_] else: include_ = [i for i, k in enumerate(event_codes) if k in include] include_names = include for kk, v in [('include', include_names), ('exclude', exclude)]: if isinstance(v, list): for k in v: if k not in event_codes: raise ValueError('Could find event named "%s"' % k) elif v is not None: raise ValueError('`%s` must be None or of type list' % kk) event_ids = np.arange(len(include_)) + 1 logger.info(' Synthesizing trigger channel "STI 014" ...') logger.info(' Excluding events {%s} ...' % ", ".join([k for i, k in enumerate(event_codes) if i not in include_])) egi_info['new_trigger'] = _combine_triggers( egi_events[include_], remapping=event_ids) self.event_id = dict(zip([e for e in event_codes if e in include_names], event_ids)) else: # No events self.event_id = None egi_info['new_trigger'] = None info = _empty_info(egi_info['samp_rate']) my_time = datetime.datetime( egi_info['year'], egi_info['month'], egi_info['day'], egi_info['hour'], egi_info['minute'], egi_info['second']) my_timestamp = time.mktime(my_time.timetuple()) info['meas_date'] = (my_timestamp, 0) ch_names = [channel_naming % (i + 1) for i in range(egi_info['n_channels'])] ch_names.extend(list(egi_info['event_codes'])) if egi_info['new_trigger'] is not None: ch_names.append('STI 014') # our new_trigger nchan = len(ch_names) cals = np.repeat(cal, nchan) ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) sti_ch_idx = [i for i, name in enumerate(ch_names) if name.startswith('STI') or name in event_codes] for idx in sti_ch_idx: chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, 'cal': 1., 'kind': FIFF.FIFFV_STIM_CH, 'coil_type': FIFF.FIFFV_COIL_NONE, 'unit': FIFF.FIFF_UNIT_NONE}) info['chs'] = chs info._update_redundant() super(RawEGI, self).__init__( info, preload, orig_format=egi_info['orig_format'], filenames=[input_fname], last_samps=[egi_info['n_samples'] - 1], raw_extras=[egi_info], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" egi_info = self._raw_extras[fi] dtype = egi_info['dtype'] n_chan_read = egi_info['n_channels'] + egi_info['n_events'] offset = 36 + egi_info['n_events'] * 4 trigger_ch = egi_info['new_trigger'] _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=dtype, n_channels=n_chan_read, offset=offset, trigger_ch=trigger_ch)
# Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import copy import os from os import path as op import shutil import numpy as np from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal import pytest import mne from mne import (pick_types, read_annotations, create_info, events_from_annotations, make_forward_solution) from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf, RawArray from mne.io.compensator import get_current_comp from mne.io.ctf.constants import CTF from mne.io.tests.test_raw import _test_raw_reader from mne.tests.test_annotations import _assert_annotations_equal from mne.utils import _clean_names, catch_logging, _stamp_to_dt from mne.datasets import testing, spm_face, brainstorm from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' somato_fname = op.join( brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', 'subj001_somatosensory_20111109_01_AUX-f.ds' ) block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, } single_trials = ( ctf_fname_continuous, ctf_fname_1_trial, ) ctf_fnames = tuple(sorted(block_sizes.keys())) @pytest.mark.slowtest @testing.requires_testing_data def test_read_ctf(tmpdir): """Test CTF reader.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') # Create a dummy .eeg file so we can test our reading/application of it os.mkdir(op.join(temp_dir, 'randpos')) ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) picks = pick_types(raw.info, meg=False, eeg=True) pos = np.random.RandomState(42).randn(len(picks), 3) fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') # Create a bad file with open(fake_eeg_fname, 'wb') as fid: fid.write('foo\n'.encode('ascii')) pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) # Create a good file with open(fake_eeg_fname, 'wb') as fid: for ii, ch_num in enumerate(picks): args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( '%0.5f' % x for x in 100 * pos[ii]) # convert to cm fid.write(('\t'.join(args) + '\n').encode('ascii')) pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = read_raw_ctf(ctf_eeg_fname) # read modified data pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, rtol=1e-5, atol=1e-5) assert (pos_read == pos_read_old).mean() < 0.1 shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) # Create a version with no hc, starting out *with* EEG pos (error) os.mkdir(op.join(temp_dir, 'nohc')) ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) os.remove(remove_base + '.hc') with pytest.warns(RuntimeWarning, match='MISC channel'): pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) os.remove(remove_base + '.eeg') shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) # All our files use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] for fname in use_fnames: raw_c = read_raw_fif(fname + '_raw.fif', preload=True) with pytest.warns(None): # sometimes matches "MISC channel" raw = read_raw_ctf(fname) # check info match assert_array_equal(raw.ch_names, raw_c.ch_names) assert_allclose(raw.times, raw_c.times) assert_allclose(raw._cals, raw_c._cals) assert (raw.info['meas_id']['version'] == raw_c.info['meas_id']['version'] + 1) for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], rtol=1e-4, atol=1e-7) # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. # Consider adding meas_date to below checks once this is addressed in # MNE-C for key in ('acq_pars', 'acq_stim', 'bads', 'ch_names', 'custom_ref_applied', 'description', 'events', 'experimenter', 'highpass', 'line_freq', 'lowpass', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info'): assert raw.info[key] == raw_c.info[key], key if op.basename(fname) not in single_trials: # We don't force buffer size to be smaller like MNE-C assert raw.buffer_size_sec == raw_c.buffer_size_sec assert len(raw.info['comps']) == len(raw_c.info['comps']) for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): for key in ('colcals', 'rowcals'): assert_allclose(c1[key], c2[key]) assert c1['save_calibrated'] == c2['save_calibrated'] for key in ('row_names', 'col_names', 'nrow', 'ncol'): assert_array_equal(c1['data'][key], c2['data'][key]) assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, rtol=1e-5) assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], raw_c.info['hpi_results'][0]['coord_trans']['trans'], rtol=1e-5, atol=1e-7) assert len(raw.info['chs']) == len(raw_c.info['chs']) for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', 'range', 'coord_frame', 'coil_type', 'logno'): if c1['ch_name'] == 'RMSP' and \ 'catch-alp-good-f' in fname and \ key in ('kind', 'unit', 'coord_frame', 'coil_type', 'logno'): continue # XXX see below... if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: # XXX MNE-C bug that this is not set assert c2[key] == FIFF.FIFFV_COIL_NONE continue assert c1[key] == c2[key], key for key in ('cal',): assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # XXX 2016/02/24: fixed bug with normal computation that used # to exist, once mne-C tools are updated we should update our FIF # conversion files, then the slices can go away (and the check # can be combined with that for "cal") for key in ('loc',): if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: continue if (c2[key][:3] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][:3] assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) if (c2[key][3:] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][9:12] assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # Make sure all digitization points are in the MNE head coord frame for p in raw.info['dig']: assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ 'dig points must be in FIFF.FIFFV_COORD_HEAD' if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file raw.info['dig'] = raw.info['dig'][:-10] # XXX: Next test would fail because c-tools assign the fiducials from # CTF data as HPI. Should eventually clarify/unify with Matti. # assert_dig_allclose(raw.info, raw_c.info) # check data match raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) raw_read = read_raw_fif(out_fname) # so let's check tricky cases based on sample boundaries rng = np.random.RandomState(0) pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) assert bnd == raw._raw_extras[0]['block_size'] assert bnd == block_sizes[op.basename(fname)] slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None)) if len(raw.times) >= 2 * bnd: # at least two complete blocks slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)) for sl_time in slices: assert_allclose(raw[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations if 'testdata_ctf_short.ds' in fname: assert 'bad' in raw.annotations.description[0] assert_allclose(raw.annotations.onset, [2.15]) assert_allclose(raw.annotations.duration, [0.0225]) with pytest.raises(TypeError, match='path-like'): read_raw_ctf(1) with pytest.raises(FileNotFoundError, match='does not exist'): read_raw_ctf(ctf_fname_continuous + 'foo.ds') # test ignoring of system clock read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') with pytest.raises(ValueError, match='system_clock'): read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') @testing.requires_testing_data def test_rawctf_clean_names(): """Test RawCTF _clean_names method.""" # read test data with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), clean_names=True) test_channel_names = _clean_names(raw.ch_names) test_info_comps = copy.deepcopy(raw.info['comps']) # channel names should not be cleaned by default assert raw.ch_names != test_channel_names chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] assert chs_ch_names != test_channel_names for test_comp, comp in zip(test_info_comps, raw.info['comps']): for key in ('row_names', 'col_names'): assert not array_equal(_clean_names(test_comp['data'][key]), comp['data'][key]) # channel names should be cleaned if clean_names=True assert raw_cleaned.ch_names == test_channel_names for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): assert ch['ch_name'] == test_ch_name for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): for key in ('row_names', 'col_names'): assert _clean_names(test_comp['data'][key]) == comp['data'][key] @spm_face.requires_spm_data def test_read_spm_ctf(): """Test CTF reader with omitted samples.""" data_path = spm_face.data_path() raw_fname = op.join(data_path, 'MEG', 'spm', 'SPM_CTF_MEG_example_faces1_3D.ds') raw = read_raw_ctf(raw_fname) extras = raw._raw_extras[0] assert extras['n_samp'] == raw.n_times assert extras['n_samp'] != extras['n_samp_tot'] # Test that LPA, nasion and RPA are correct. coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) cardinals = {d['ident']: d['r'] for d in raw.info['dig']} assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord assert cardinals[1][1] < cardinals[2][1] # y coord assert cardinals[3][1] < cardinals[2][1] # y coord for key in cardinals.keys(): assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord @testing.requires_testing_data @pytest.mark.parametrize('comp_grade', [0, 1]) def test_saving_picked(tmpdir, comp_grade): """Test saving picked CTF instances.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) raw.crop(0, 1).load_data() assert raw.compensation_grade == get_current_comp(raw.info) == 0 assert len(raw.info['comps']) == 5 pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) raw.apply_gradient_compensation(comp_grade) with catch_logging() as log: raw_pick = raw.copy().pick_types(**pick_kwargs) assert len(raw.info['comps']) == 5 assert len(raw_pick.info['comps']) == 0 log = log.getvalue() assert 'Removing 5 compensators' in log raw_pick.save(out_fname, overwrite=True) # should work raw2 = read_raw_fif(out_fname) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 raw2 = read_raw_fif(out_fname, preload=True) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 @brainstorm.bst_raw.requires_bstraw_data def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6) @testing.requires_testing_data def test_read_ctf_annotations_smoke_test(): """Test reading CTF marker file. `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading of whatever is in the MarkerFile.mrk. """ EXPECTED_ONSET = [ 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, 4.57, 4.7125, 4.85583333, 4.99833333 ] fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') annot = read_annotations(fname) assert_allclose(annot.onset, EXPECTED_ONSET) raw = read_raw_ctf(fname) _assert_annotations_equal(raw.annotations, annot, 1e-6) def _read_res4_mag_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: ch['grad_order_no'] = 1 return res def _bad_res4_grad_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: ch['grad_order_no'] = 1 break return res @testing.requires_testing_data def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch): """Test CTF reader with mag comps and bad comps.""" path = op.join(ctf_dir, ctf_fname_continuous) raw_orig = read_raw_ctf(path) assert raw_orig.compensation_grade == 0 monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) raw_mag_comp = read_raw_ctf(path) assert raw_mag_comp.compensation_grade == 0 sphere = mne.make_sphere_model() src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) assert src[0]['nuse'] == 26 for grade in (0, 1): raw_orig.apply_gradient_compensation(grade) raw_mag_comp.apply_gradient_compensation(grade) args = (None, src, sphere, True, False) fwd_orig = make_forward_solution(raw_orig.info, *args) fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) with pytest.raises(RuntimeError, match='inconsistent compensation grade'): read_raw_ctf(path)
rkmaddox/mne-python
mne/io/ctf/tests/test_ctf.py
mne/io/egi/egi.py
"""Mayavi/traits GUI for averaging two sets of KIT marker points.""" # Authors: Christian Brodbeck <christianbrodbeck@nyu.edu> # # License: BSD (3-clause) import os import sys import numpy as np from mayavi.tools.mlab_scene_model import MlabSceneModel from pyface.api import confirm, error, FileDialog, OK, YES from traits.api import (HasTraits, HasPrivateTraits, on_trait_change, cached_property, Instance, Property, Array, Bool, Button, Enum, File, Float, List, Str, ArrayOrNone) from traitsui.api import View, Item, HGroup, VGroup, CheckListEditor from traitsui.menu import Action, CancelButton from ..transforms import apply_trans, rotation, translation from ..coreg import fit_matched_points from ..io.kit import read_mrk from ..io._digitization import _write_dig_points from ._viewer import PointObject from ._backend import _get_pyface_backend if _get_pyface_backend() == 'wx': mrk_wildcard = [ 'Supported Files (*.sqd, *.mrk, *.txt, *.pickled)|*.sqd;*.mrk;*.txt;*.pickled', # noqa:E501 'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk', 'Text marker file (*.txt)|*.txt', 'Pickled markers (*.pickled)|*.pickled'] mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt"] else: if sys.platform in ('win32', 'linux2'): # on Windows and Ubuntu, multiple wildcards does not seem to work mrk_wildcard = ["*.sqd", "*.mrk", "*.txt", "*.pickled"] else: mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"] mrk_out_wildcard = "*.txt" out_ext = '.txt' use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)]) use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)]) mrk_view_editable = View( VGroup('file', Item('name', show_label=False, style='readonly'), HGroup( Item('use', editor=use_editor_v, enabled_when="enabled", style='custom'), 'points', ), HGroup(Item('clear', enabled_when="can_save", show_label=False), Item('save_as', enabled_when="can_save", show_label=False)), )) mrk_view_basic = View( VGroup('file', Item('name', show_label=False, style='readonly'), Item('use', editor=use_editor_h, enabled_when="enabled", style='custom'), HGroup(Item('clear', enabled_when="can_save", show_label=False), Item('edit', show_label=False), Item('switch_left_right', label="Switch Left/Right", show_label=False), Item('reorder', show_label=False), Item('save_as', enabled_when="can_save", show_label=False)), )) mrk_view_edit = View(VGroup('points')) class ReorderDialog(HasPrivateTraits): """Dialog for reordering marker points.""" order = Str("0 1 2 3 4") index = Property(List, depends_on='order') is_ok = Property(Bool, depends_on='order') view = View( Item('order', label='New order (five space delimited numbers)'), buttons=[CancelButton, Action(name='OK', enabled_when='is_ok')]) def _get_index(self): try: return [int(i) for i in self.order.split()] except ValueError: return [] def _get_is_ok(self): return sorted(self.index) == [0, 1, 2, 3, 4] class MarkerPoints(HasPrivateTraits): """Represent 5 marker points.""" points = Array(float, (5, 3)) can_save = Property(depends_on='points') save_as = Button() view = View(VGroup('points', Item('save_as', enabled_when='can_save'))) @cached_property def _get_can_save(self): return np.any(self.points) def _save_as_fired(self): dlg = FileDialog(action="save as", wildcard=mrk_out_wildcard, default_filename=self.name, default_directory=self.dir) dlg.open() if dlg.return_code != OK: return path, ext = os.path.splitext(dlg.path) if not path.endswith(out_ext) and len(ext) != 0: ValueError("The extension '%s' is not supported." % ext) path = path + out_ext if os.path.exists(path): answer = confirm(None, "The file %r already exists. Should it " "be replaced?", "Overwrite File?") if answer != YES: return self.save(path) def save(self, path): """Save the marker points. Parameters ---------- path : str Path to the file to write. The kind of file to write is determined based on the extension: '.txt' for tab separated text file, '.pickled' for pickled file. """ _write_dig_points(path, self.points) class MarkerPointSource(MarkerPoints): # noqa: D401 """MarkerPoints subclass for source files.""" file = File(filter=mrk_wildcard, exists=True) name = Property(Str, depends_on='file') dir = Property(Str, depends_on='file') use = List(list(range(5)), desc="Which points to use for the interpolated " "marker.") enabled = Property(Bool, depends_on=['points', 'use']) clear = Button(desc="Clear the current marker data") edit = Button(desc="Edit the marker coordinates manually") switch_left_right = Button( desc="Switch left and right marker points; this is intended to " "correct for markers that were attached in the wrong order") reorder = Button(desc="Change the order of the marker points") view = mrk_view_basic @cached_property def _get_enabled(self): return np.any(self.points) @cached_property def _get_dir(self): if self.file: return os.path.dirname(self.file) @cached_property def _get_name(self): if self.file: return os.path.basename(self.file) @on_trait_change('file') def load(self, fname): if not fname: self.reset_traits(['points']) return try: pts = read_mrk(fname) except Exception as err: error(None, str(err), "Error Reading mrk") self.reset_traits(['points']) else: self.points = pts def _clear_fired(self): self.reset_traits(['file', 'points', 'use']) def _edit_fired(self): self.edit_traits(view=mrk_view_edit) def _reorder_fired(self): dlg = ReorderDialog() ui = dlg.edit_traits(kind='modal') if not ui.result: # user pressed cancel return self.points = self.points[dlg.index] def _switch_left_right_fired(self): self.points = self.points[[1, 0, 2, 4, 3]] class MarkerPointDest(MarkerPoints): # noqa: D401 """MarkerPoints subclass that serves for derived points.""" src1 = Instance(MarkerPointSource) src2 = Instance(MarkerPointSource) name = Property(Str, depends_on='src1.name,src2.name') dir = Property(Str, depends_on='src1.dir,src2.dir') points = Property(ArrayOrNone(float, (5, 3)), depends_on=['method', 'src1.points', 'src1.use', 'src2.points', 'src2.use']) enabled = Property(Bool, depends_on=['points']) method = Enum('Transform', 'Average', desc="Transform: estimate a rotation" "/translation from mrk1 to mrk2; Average: use the average " "of the mrk1 and mrk2 coordinates for each point.") view = View(VGroup(Item('method', style='custom'), Item('save_as', enabled_when='can_save', show_label=False))) @cached_property def _get_dir(self): return self.src1.dir @cached_property def _get_name(self): n1 = self.src1.name n2 = self.src2.name if not n1: if n2: return n2 else: return '' elif not n2: return n1 if n1 == n2: return n1 i = 0 l1 = len(n1) - 1 l2 = len(n1) - 2 while n1[i] == n2[i]: if i == l1: return n1 elif i == l2: return n2 i += 1 return n1[:i] @cached_property def _get_enabled(self): return np.any(self.points) @cached_property def _get_points(self): # in case only one or no source is enabled if not (self.src1 and self.src1.enabled): if (self.src2 and self.src2.enabled): return self.src2.points else: return np.zeros((5, 3)) elif not (self.src2 and self.src2.enabled): return self.src1.points # Average method if self.method == 'Average': if len(np.union1d(self.src1.use, self.src2.use)) < 5: error(None, "Need at least one source for each point.", "Marker Average Error") return np.zeros((5, 3)) pts = (self.src1.points + self.src2.points) / 2. for i in np.setdiff1d(self.src1.use, self.src2.use): pts[i] = self.src1.points[i] for i in np.setdiff1d(self.src2.use, self.src1.use): pts[i] = self.src2.points[i] return pts # Transform method idx = np.intersect1d(np.array(self.src1.use), np.array(self.src2.use), assume_unique=True) if len(idx) < 3: error(None, "Need at least three shared points for trans" "formation.", "Marker Interpolation Error") return np.zeros((5, 3)) src_pts = self.src1.points[idx] tgt_pts = self.src2.points[idx] est = fit_matched_points(src_pts, tgt_pts, out='params') rot = np.array(est[:3]) / 2. tra = np.array(est[3:]) / 2. if len(self.src1.use) == 5: trans = np.dot(translation(*tra), rotation(*rot)) pts = apply_trans(trans, self.src1.points) elif len(self.src2.use) == 5: trans = np.dot(translation(* -tra), rotation(* -rot)) pts = apply_trans(trans, self.src2.points) else: trans1 = np.dot(translation(*tra), rotation(*rot)) pts = apply_trans(trans1, self.src1.points) trans2 = np.dot(translation(* -tra), rotation(* -rot)) for i in np.setdiff1d(self.src2.use, self.src1.use): pts[i] = apply_trans(trans2, self.src2.points[i]) return pts class CombineMarkersModel(HasPrivateTraits): """Combine markers model.""" mrk1_file = Instance(File) mrk2_file = Instance(File) mrk1 = Instance(MarkerPointSource) mrk2 = Instance(MarkerPointSource) mrk3 = Instance(MarkerPointDest) clear = Button(desc="Clear the current marker data") # stats distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points']) def _clear_fired(self): self.mrk1.clear = True self.mrk2.clear = True self.mrk3.reset_traits(['method']) def _mrk1_default(self): return MarkerPointSource() def _mrk1_file_default(self): return self.mrk1.trait('file') def _mrk2_default(self): return MarkerPointSource() def _mrk2_file_default(self): return self.mrk2.trait('file') def _mrk3_default(self): return MarkerPointDest(src1=self.mrk1, src2=self.mrk2) @cached_property def _get_distance(self): if (self.mrk1 is None or self.mrk2 is None or (not np.any(self.mrk1.points)) or (not np.any(self.mrk2.points))): return "" ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1)) desc = '\t'.join('%.1f mm' % (d * 1000) for d in ds) return desc class CombineMarkersPanel(HasTraits): # noqa: D401 """Has two marker points sources and interpolates to a third one.""" model = Instance(CombineMarkersModel, ()) # model references for UI mrk1 = Instance(MarkerPointSource) mrk2 = Instance(MarkerPointSource) mrk3 = Instance(MarkerPointDest) distance = Str # Visualization scene = Instance(MlabSceneModel) scale = Float(5e-3) mrk1_obj = Instance(PointObject) mrk2_obj = Instance(PointObject) mrk3_obj = Instance(PointObject) trans = Array() view = View(VGroup(VGroup(Item('mrk1', style='custom'), Item('mrk1_obj', style='custom'), show_labels=False, label="Source Marker 1", show_border=True), VGroup(Item('mrk2', style='custom'), Item('mrk2_obj', style='custom'), show_labels=False, label="Source Marker 2", show_border=True), VGroup(Item('distance', style='readonly'), label='Stats', show_border=True), VGroup(Item('mrk3', style='custom'), Item('mrk3_obj', style='custom'), show_labels=False, label="New Marker", show_border=True), )) def _mrk1_default(self): return self.model.mrk1 def _mrk2_default(self): return self.model.mrk2 def _mrk3_default(self): return self.model.mrk3 def __init__(self, *args, **kwargs): # noqa: D102 super(CombineMarkersPanel, self).__init__(*args, **kwargs) self.model.sync_trait('distance', self, 'distance', mutual=False) self.mrk1_obj = PointObject(scene=self.scene, color=(0.608, 0.216, 0.216), point_scale=self.scale) self.model.mrk1.sync_trait( 'enabled', self.mrk1_obj, 'visible', mutual=False) self.mrk2_obj = PointObject(scene=self.scene, color=(0.216, 0.608, 0.216), point_scale=self.scale) self.model.mrk2.sync_trait( 'enabled', self.mrk2_obj, 'visible', mutual=False) self.mrk3_obj = PointObject(scene=self.scene, color=(0.588, 0.784, 1.), point_scale=self.scale) self.model.mrk3.sync_trait( 'enabled', self.mrk3_obj, 'visible', mutual=False) @on_trait_change('model:mrk1:points,trans') def _update_mrk1(self): if self.mrk1_obj is not None: self.mrk1_obj.points = apply_trans(self.trans, self.model.mrk1.points) @on_trait_change('model:mrk2:points,trans') def _update_mrk2(self): if self.mrk2_obj is not None: self.mrk2_obj.points = apply_trans(self.trans, self.model.mrk2.points) @on_trait_change('model:mrk3:points,trans') def _update_mrk3(self): if self.mrk3_obj is not None: self.mrk3_obj.points = apply_trans(self.trans, self.model.mrk3.points)
# Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import copy import os from os import path as op import shutil import numpy as np from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal import pytest import mne from mne import (pick_types, read_annotations, create_info, events_from_annotations, make_forward_solution) from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf, RawArray from mne.io.compensator import get_current_comp from mne.io.ctf.constants import CTF from mne.io.tests.test_raw import _test_raw_reader from mne.tests.test_annotations import _assert_annotations_equal from mne.utils import _clean_names, catch_logging, _stamp_to_dt from mne.datasets import testing, spm_face, brainstorm from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' somato_fname = op.join( brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', 'subj001_somatosensory_20111109_01_AUX-f.ds' ) block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, } single_trials = ( ctf_fname_continuous, ctf_fname_1_trial, ) ctf_fnames = tuple(sorted(block_sizes.keys())) @pytest.mark.slowtest @testing.requires_testing_data def test_read_ctf(tmpdir): """Test CTF reader.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') # Create a dummy .eeg file so we can test our reading/application of it os.mkdir(op.join(temp_dir, 'randpos')) ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) picks = pick_types(raw.info, meg=False, eeg=True) pos = np.random.RandomState(42).randn(len(picks), 3) fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') # Create a bad file with open(fake_eeg_fname, 'wb') as fid: fid.write('foo\n'.encode('ascii')) pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) # Create a good file with open(fake_eeg_fname, 'wb') as fid: for ii, ch_num in enumerate(picks): args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( '%0.5f' % x for x in 100 * pos[ii]) # convert to cm fid.write(('\t'.join(args) + '\n').encode('ascii')) pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = read_raw_ctf(ctf_eeg_fname) # read modified data pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, rtol=1e-5, atol=1e-5) assert (pos_read == pos_read_old).mean() < 0.1 shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) # Create a version with no hc, starting out *with* EEG pos (error) os.mkdir(op.join(temp_dir, 'nohc')) ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) os.remove(remove_base + '.hc') with pytest.warns(RuntimeWarning, match='MISC channel'): pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) os.remove(remove_base + '.eeg') shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) # All our files use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] for fname in use_fnames: raw_c = read_raw_fif(fname + '_raw.fif', preload=True) with pytest.warns(None): # sometimes matches "MISC channel" raw = read_raw_ctf(fname) # check info match assert_array_equal(raw.ch_names, raw_c.ch_names) assert_allclose(raw.times, raw_c.times) assert_allclose(raw._cals, raw_c._cals) assert (raw.info['meas_id']['version'] == raw_c.info['meas_id']['version'] + 1) for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], rtol=1e-4, atol=1e-7) # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. # Consider adding meas_date to below checks once this is addressed in # MNE-C for key in ('acq_pars', 'acq_stim', 'bads', 'ch_names', 'custom_ref_applied', 'description', 'events', 'experimenter', 'highpass', 'line_freq', 'lowpass', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info'): assert raw.info[key] == raw_c.info[key], key if op.basename(fname) not in single_trials: # We don't force buffer size to be smaller like MNE-C assert raw.buffer_size_sec == raw_c.buffer_size_sec assert len(raw.info['comps']) == len(raw_c.info['comps']) for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): for key in ('colcals', 'rowcals'): assert_allclose(c1[key], c2[key]) assert c1['save_calibrated'] == c2['save_calibrated'] for key in ('row_names', 'col_names', 'nrow', 'ncol'): assert_array_equal(c1['data'][key], c2['data'][key]) assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, rtol=1e-5) assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], raw_c.info['hpi_results'][0]['coord_trans']['trans'], rtol=1e-5, atol=1e-7) assert len(raw.info['chs']) == len(raw_c.info['chs']) for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', 'range', 'coord_frame', 'coil_type', 'logno'): if c1['ch_name'] == 'RMSP' and \ 'catch-alp-good-f' in fname and \ key in ('kind', 'unit', 'coord_frame', 'coil_type', 'logno'): continue # XXX see below... if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: # XXX MNE-C bug that this is not set assert c2[key] == FIFF.FIFFV_COIL_NONE continue assert c1[key] == c2[key], key for key in ('cal',): assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # XXX 2016/02/24: fixed bug with normal computation that used # to exist, once mne-C tools are updated we should update our FIF # conversion files, then the slices can go away (and the check # can be combined with that for "cal") for key in ('loc',): if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: continue if (c2[key][:3] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][:3] assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) if (c2[key][3:] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][9:12] assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # Make sure all digitization points are in the MNE head coord frame for p in raw.info['dig']: assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ 'dig points must be in FIFF.FIFFV_COORD_HEAD' if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file raw.info['dig'] = raw.info['dig'][:-10] # XXX: Next test would fail because c-tools assign the fiducials from # CTF data as HPI. Should eventually clarify/unify with Matti. # assert_dig_allclose(raw.info, raw_c.info) # check data match raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) raw_read = read_raw_fif(out_fname) # so let's check tricky cases based on sample boundaries rng = np.random.RandomState(0) pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) assert bnd == raw._raw_extras[0]['block_size'] assert bnd == block_sizes[op.basename(fname)] slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None)) if len(raw.times) >= 2 * bnd: # at least two complete blocks slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)) for sl_time in slices: assert_allclose(raw[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations if 'testdata_ctf_short.ds' in fname: assert 'bad' in raw.annotations.description[0] assert_allclose(raw.annotations.onset, [2.15]) assert_allclose(raw.annotations.duration, [0.0225]) with pytest.raises(TypeError, match='path-like'): read_raw_ctf(1) with pytest.raises(FileNotFoundError, match='does not exist'): read_raw_ctf(ctf_fname_continuous + 'foo.ds') # test ignoring of system clock read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') with pytest.raises(ValueError, match='system_clock'): read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') @testing.requires_testing_data def test_rawctf_clean_names(): """Test RawCTF _clean_names method.""" # read test data with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), clean_names=True) test_channel_names = _clean_names(raw.ch_names) test_info_comps = copy.deepcopy(raw.info['comps']) # channel names should not be cleaned by default assert raw.ch_names != test_channel_names chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] assert chs_ch_names != test_channel_names for test_comp, comp in zip(test_info_comps, raw.info['comps']): for key in ('row_names', 'col_names'): assert not array_equal(_clean_names(test_comp['data'][key]), comp['data'][key]) # channel names should be cleaned if clean_names=True assert raw_cleaned.ch_names == test_channel_names for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): assert ch['ch_name'] == test_ch_name for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): for key in ('row_names', 'col_names'): assert _clean_names(test_comp['data'][key]) == comp['data'][key] @spm_face.requires_spm_data def test_read_spm_ctf(): """Test CTF reader with omitted samples.""" data_path = spm_face.data_path() raw_fname = op.join(data_path, 'MEG', 'spm', 'SPM_CTF_MEG_example_faces1_3D.ds') raw = read_raw_ctf(raw_fname) extras = raw._raw_extras[0] assert extras['n_samp'] == raw.n_times assert extras['n_samp'] != extras['n_samp_tot'] # Test that LPA, nasion and RPA are correct. coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) cardinals = {d['ident']: d['r'] for d in raw.info['dig']} assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord assert cardinals[1][1] < cardinals[2][1] # y coord assert cardinals[3][1] < cardinals[2][1] # y coord for key in cardinals.keys(): assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord @testing.requires_testing_data @pytest.mark.parametrize('comp_grade', [0, 1]) def test_saving_picked(tmpdir, comp_grade): """Test saving picked CTF instances.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) raw.crop(0, 1).load_data() assert raw.compensation_grade == get_current_comp(raw.info) == 0 assert len(raw.info['comps']) == 5 pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) raw.apply_gradient_compensation(comp_grade) with catch_logging() as log: raw_pick = raw.copy().pick_types(**pick_kwargs) assert len(raw.info['comps']) == 5 assert len(raw_pick.info['comps']) == 0 log = log.getvalue() assert 'Removing 5 compensators' in log raw_pick.save(out_fname, overwrite=True) # should work raw2 = read_raw_fif(out_fname) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 raw2 = read_raw_fif(out_fname, preload=True) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 @brainstorm.bst_raw.requires_bstraw_data def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6) @testing.requires_testing_data def test_read_ctf_annotations_smoke_test(): """Test reading CTF marker file. `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading of whatever is in the MarkerFile.mrk. """ EXPECTED_ONSET = [ 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, 4.57, 4.7125, 4.85583333, 4.99833333 ] fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') annot = read_annotations(fname) assert_allclose(annot.onset, EXPECTED_ONSET) raw = read_raw_ctf(fname) _assert_annotations_equal(raw.annotations, annot, 1e-6) def _read_res4_mag_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: ch['grad_order_no'] = 1 return res def _bad_res4_grad_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: ch['grad_order_no'] = 1 break return res @testing.requires_testing_data def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch): """Test CTF reader with mag comps and bad comps.""" path = op.join(ctf_dir, ctf_fname_continuous) raw_orig = read_raw_ctf(path) assert raw_orig.compensation_grade == 0 monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) raw_mag_comp = read_raw_ctf(path) assert raw_mag_comp.compensation_grade == 0 sphere = mne.make_sphere_model() src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) assert src[0]['nuse'] == 26 for grade in (0, 1): raw_orig.apply_gradient_compensation(grade) raw_mag_comp.apply_gradient_compensation(grade) args = (None, src, sphere, True, False) fwd_orig = make_forward_solution(raw_orig.info, *args) fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) with pytest.raises(RuntimeError, match='inconsistent compensation grade'): read_raw_ctf(path)
rkmaddox/mne-python
mne/io/ctf/tests/test_ctf.py
mne/gui/_marker_gui.py
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) from collections import defaultdict from colorsys import hsv_to_rgb, rgb_to_hsv import copy as cp import os import os.path as op import re import numpy as np from .morph_map import read_morph_map from .parallel import parallel_func, check_n_jobs from .source_estimate import (SourceEstimate, VolSourceEstimate, _center_of_mass, extract_label_time_course, spatial_src_adjacency) from .source_space import (add_source_space_distances, SourceSpaces, read_freesurfer_lut, _import_nibabel) from .stats.cluster_level import _find_clusters, _get_components from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist from .transforms import apply_trans from .utils import (get_subjects_dir, _check_subject, logger, verbose, warn, check_random_state, _validate_type, fill_doc, _check_option, check_version) def _blend_colors(color_1, color_2): """Blend two colors in HSV space. Parameters ---------- color_1, color_2 : None | tuple RGBA tuples with values between 0 and 1. None if no color is available. If both colors are None, the output is None. If only one is None, the output is the other color. Returns ------- color : None | tuple RGBA tuple of the combined color. Saturation, value and alpha are averaged, whereas the new hue is determined as angle half way between the two input colors' hues. """ if color_1 is None and color_2 is None: return None elif color_1 is None: return color_2 elif color_2 is None: return color_1 r_1, g_1, b_1, a_1 = color_1 h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1) r_2, g_2, b_2, a_2 = color_2 h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2) hue_diff = abs(h_1 - h_2) if hue_diff < 0.5: h = min(h_1, h_2) + hue_diff / 2. else: h = max(h_1, h_2) + (1. - hue_diff) / 2. h %= 1. s = (s_1 + s_2) / 2. v = (v_1 + v_2) / 2. r, g, b = hsv_to_rgb(h, s, v) a = (a_1 + a_2) / 2. color = (r, g, b, a) return color def _split_colors(color, n): """Create n colors in HSV space that occupy a gradient in value. Parameters ---------- color : tuple RGBA tuple with values between 0 and 1. n : int >= 2 Number of colors on the gradient. Returns ------- colors : tuple of tuples, len = n N RGBA tuples that occupy a gradient in value (low to high) but share saturation and hue with the input color. """ r, g, b, a = color h, s, v = rgb_to_hsv(r, g, b) gradient_range = np.sqrt(n / 10.) if v > 0.5: v_max = min(0.95, v + gradient_range / 2) v_min = max(0.05, v_max - gradient_range) else: v_min = max(0.05, v - gradient_range / 2) v_max = min(0.95, v_min + gradient_range) hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n)) rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors) rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors) return tuple(rgba_colors) def _n_colors(n, bytes_=False, cmap='hsv'): """Produce a list of n unique RGBA color tuples based on a colormap. Parameters ---------- n : int Number of colors. bytes : bool Return colors as integers values between 0 and 255 (instead of floats between 0 and 1). cmap : str Which colormap to use. Returns ------- colors : array, shape (n, 4) RGBA color values. """ n_max = 2 ** 10 if n > n_max: raise NotImplementedError("Can't produce more than %i unique " "colors" % n_max) from matplotlib.cm import get_cmap cm = get_cmap(cmap, n_max) pos = np.linspace(0, 1, n, False) colors = cm(pos, bytes=bytes_) if bytes_: # make sure colors are unique for ii, c in enumerate(colors): if np.any(np.all(colors[:ii] == c, 1)): raise RuntimeError('Could not get %d unique colors from %s ' 'colormap. Try using a different colormap.' % (n, cmap)) return colors @fill_doc class Label(object): """A FreeSurfer/MNE label with vertices restricted to one hemisphere. Labels can be combined with the ``+`` operator: * Duplicate vertices are removed. * If duplicate vertices have conflicting position values, an error is raised. * Values of duplicate vertices are summed. Parameters ---------- vertices : array, shape (N,) Vertex indices (0 based). pos : array, shape (N, 3) | None Locations in meters. If None, then zeros are used. values : array, shape (N,) | None Values at the vertices. If None, then ones are used. hemi : 'lh' | 'rh' Hemisphere to which the label applies. comment : str Kept as information but not used by the object itself. name : str Kept as information but not used by the object itself. filename : str Kept as information but not used by the object itself. subject : str | None Name of the subject the label is from. color : None | matplotlib color Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red). %(verbose)s Attributes ---------- color : None | tuple Default label color, represented as RGBA tuple with values between 0 and 1. comment : str Comment from the first line of the label file. hemi : 'lh' | 'rh' Hemisphere. name : None | str A name for the label. It is OK to change that attribute manually. pos : array, shape (N, 3) Locations in meters. subject : str | None Subject name. It is best practice to set this to the proper value on initialization, but it can also be set manually. values : array, shape (N,) Values at the vertices. %(verbose)s vertices : array, shape (N,) Vertex indices (0 based) """ @verbose def __init__(self, vertices=(), pos=None, values=None, hemi=None, comment="", name=None, filename=None, subject=None, color=None, verbose=None): # noqa: D102 # check parameters if not isinstance(hemi, str): raise ValueError('hemi must be a string, not %s' % type(hemi)) vertices = np.asarray(vertices, int) if np.any(np.diff(vertices.astype(int)) <= 0): raise ValueError('Vertices must be ordered in increasing order.') if color is not None: from matplotlib.colors import colorConverter color = colorConverter.to_rgba(color) if values is None: values = np.ones(len(vertices)) else: values = np.asarray(values) if pos is None: pos = np.zeros((len(vertices), 3)) else: pos = np.asarray(pos) if not (len(vertices) == len(values) == len(pos)): raise ValueError("vertices, values and pos need to have same " "length (number of vertices)") # name if name is None and filename is not None: name = op.basename(filename[:-6]) self.vertices = vertices self.pos = pos self.values = values self.hemi = hemi self.comment = comment self.verbose = verbose self.subject = _check_subject(None, subject, raise_error=False) self.color = color self.name = name self.filename = filename def __setstate__(self, state): # noqa: D105 self.vertices = state['vertices'] self.pos = state['pos'] self.values = state['values'] self.hemi = state['hemi'] self.comment = state['comment'] self.verbose = state['verbose'] self.subject = state.get('subject', None) self.color = state.get('color', None) self.name = state['name'] self.filename = state['filename'] def __getstate__(self): # noqa: D105 out = dict(vertices=self.vertices, pos=self.pos, values=self.values, hemi=self.hemi, comment=self.comment, verbose=self.verbose, subject=self.subject, color=self.color, name=self.name, filename=self.filename) return out def __repr__(self): # noqa: D105 name = 'unknown, ' if self.subject is None else self.subject + ', ' name += repr(self.name) if self.name is not None else "unnamed" n_vert = len(self) return "<Label | %s, %s : %i vertices>" % (name, self.hemi, n_vert) def __len__(self): """Return the number of vertices. Returns ------- n_vertices : int The number of vertices. """ return len(self.vertices) def __add__(self, other): """Add Labels.""" _validate_type(other, (Label, BiHemiLabel), 'other') if isinstance(other, BiHemiLabel): return other + self else: # isinstance(other, Label) if self.subject != other.subject: raise ValueError('Label subject parameters must match, got ' '"%s" and "%s". Consider setting the ' 'subject parameter on initialization, or ' 'setting label.subject manually before ' 'combining labels.' % (self.subject, other.subject)) if self.hemi != other.hemi: name = '%s + %s' % (self.name, other.name) if self.hemi == 'lh': lh, rh = self.copy(), other.copy() else: lh, rh = other.copy(), self.copy() color = _blend_colors(self.color, other.color) return BiHemiLabel(lh, rh, name, color) # check for overlap duplicates = np.intersect1d(self.vertices, other.vertices) n_dup = len(duplicates) if n_dup: self_dup = [np.where(self.vertices == d)[0][0] for d in duplicates] other_dup = [np.where(other.vertices == d)[0][0] for d in duplicates] if not np.all(self.pos[self_dup] == other.pos[other_dup]): err = ("Labels %r and %r: vertices overlap but differ in " "position values" % (self.name, other.name)) raise ValueError(err) isnew = np.array([v not in duplicates for v in other.vertices]) vertices = np.hstack((self.vertices, other.vertices[isnew])) pos = np.vstack((self.pos, other.pos[isnew])) # find position of other's vertices in new array tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices] n_self = len(self.values) n_other = len(other.values) new_len = n_self + n_other - n_dup values = np.zeros(new_len, dtype=self.values.dtype) values[:n_self] += self.values values[tgt_idx] += other.values else: vertices = np.hstack((self.vertices, other.vertices)) pos = np.vstack((self.pos, other.pos)) values = np.hstack((self.values, other.values)) indcs = np.argsort(vertices) vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs] comment = "%s + %s" % (self.comment, other.comment) name0 = self.name if self.name else 'unnamed' name1 = other.name if other.name else 'unnamed' name = "%s + %s" % (name0, name1) color = _blend_colors(self.color, other.color) verbose = self.verbose or other.verbose label = Label(vertices, pos, values, self.hemi, comment, name, None, self.subject, color, verbose) return label def __sub__(self, other): """Subtract Labels.""" _validate_type(other, (Label, BiHemiLabel), 'other') if isinstance(other, BiHemiLabel): if self.hemi == 'lh': return self - other.lh else: return self - other.rh else: # isinstance(other, Label): if self.subject != other.subject: raise ValueError('Label subject parameters must match, got ' '"%s" and "%s". Consider setting the ' 'subject parameter on initialization, or ' 'setting label.subject manually before ' 'combining labels.' % (self.subject, other.subject)) if self.hemi == other.hemi: keep = np.in1d(self.vertices, other.vertices, True, invert=True) else: keep = np.arange(len(self.vertices)) name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed') return Label(self.vertices[keep], self.pos[keep], self.values[keep], self.hemi, self.comment, name, None, self.subject, self.color, self.verbose) def save(self, filename): r"""Write to disk as FreeSurfer \*.label file. Parameters ---------- filename : str Path to label file to produce. Notes ----- Note that due to file specification limitations, the Label's subject and color attributes are not saved to disk. """ write_label(filename, self) def copy(self): """Copy the label instance. Returns ------- label : instance of Label The copied label. """ return cp.deepcopy(self) def fill(self, src, name=None): """Fill the surface between sources for a source space label. Parameters ---------- src : SourceSpaces Source space in which the label was defined. If a source space is provided, the label is expanded to fill in surface vertices that lie between the vertices included in the source space. For the added vertices, ``pos`` is filled in with positions from the source space, and ``values`` is filled in from the closest source space vertex. name : None | str Name for the new Label (default is self.name). Returns ------- label : Label The label covering the same vertices in source space but also including intermediate surface vertices. See Also -------- Label.restrict Label.smooth """ # find source space patch info if len(self.vertices) == 0: return self.copy() hemi_src = _get_label_src(self, src) if not np.all(np.in1d(self.vertices, hemi_src['vertno'])): msg = "Source space does not contain all of the label's vertices" raise ValueError(msg) if hemi_src['nearest'] is None: warn("Source space is being modified in place because patch " "information is needed. To avoid this in the future, run " "mne.add_source_space_distances() on the source space " "and save it to disk.") if check_version('scipy', '1.3'): dist_limit = 0 else: warn('SciPy < 1.3 detected, adding source space patch ' 'information will be slower. Consider upgrading SciPy.') dist_limit = np.inf add_source_space_distances(src, dist_limit=dist_limit) nearest = hemi_src['nearest'] # find new vertices include = np.in1d(nearest, self.vertices, False) vertices = np.nonzero(include)[0] # values nearest_in_label = np.digitize(nearest[vertices], self.vertices, True) values = self.values[nearest_in_label] # pos pos = hemi_src['rr'][vertices] name = self.name if name is None else name label = Label(vertices, pos, values, self.hemi, self.comment, name, None, self.subject, self.color) return label def restrict(self, src, name=None): """Restrict a label to a source space. Parameters ---------- src : instance of SourceSpaces The source spaces to use to restrict the label. name : None | str Name for the new Label (default is self.name). Returns ------- label : instance of Label The Label restricted to the set of source space vertices. See Also -------- Label.fill Notes ----- .. versionadded:: 0.20 """ if len(self.vertices) == 0: return self.copy() hemi_src = _get_label_src(self, src) mask = np.in1d(self.vertices, hemi_src['vertno']) name = self.name if name is None else name label = Label(self.vertices[mask], self.pos[mask], self.values[mask], self.hemi, self.comment, name, None, self.subject, self.color) return label @verbose def smooth(self, subject=None, smooth=2, grade=None, subjects_dir=None, n_jobs=1, verbose=None): """Smooth the label. Useful for filling in labels made in a decimated source space for display. Parameters ---------- subject : str | None The name of the subject used. If None, the value will be taken from self.subject. smooth : int Number of iterations for the smoothing of the surface data. Cannot be None here since not all vertices are used. For a grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a label. grade : int, list of shape (2,), array, or None Resolution of the icosahedral mesh (typically 5). If None, all vertices will be used (potentially filling the surface). If a list, values will be morphed to the set of vertices specified in grade[0] and grade[1], assuming that these are vertices for the left and right hemispheres. Note that specifying the vertices (e.g., grade=[np.arange(10242), np.arange(10242)] for fsaverage on a standard grade 5 source space) can be substantially faster than computing vertex locations. If one array is used, it is assumed that all vertices belong to the hemisphere of the label. To create a label filling the surface, use None. %(subjects_dir)s %(n_jobs)s %(verbose_meth)s Returns ------- label : instance of Label The smoothed label. Notes ----- This function will set label.pos to be all zeros. If the positions on the new surface are required, consider using mne.read_surface with ``label.vertices``. """ subject = _check_subject(self.subject, subject) return self.morph(subject, subject, smooth, grade, subjects_dir, n_jobs, verbose) @verbose def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None, subjects_dir=None, n_jobs=1, verbose=None): """Morph the label. Useful for transforming a label from one subject to another. Parameters ---------- subject_from : str | None The name of the subject of the current label. If None, the initial subject will be taken from self.subject. subject_to : str The name of the subject to morph the label to. This will be put in label.subject of the output label file. smooth : int Number of iterations for the smoothing of the surface data. Cannot be None here since not all vertices are used. grade : int, list of shape (2,), array, or None Resolution of the icosahedral mesh (typically 5). If None, all vertices will be used (potentially filling the surface). If a list, values will be morphed to the set of vertices specified in grade[0] and grade[1], assuming that these are vertices for the left and right hemispheres. Note that specifying the vertices (e.g., ``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a standard grade 5 source space) can be substantially faster than computing vertex locations. If one array is used, it is assumed that all vertices belong to the hemisphere of the label. To create a label filling the surface, use None. %(subjects_dir)s %(n_jobs)s %(verbose_meth)s Returns ------- label : instance of Label The morphed label. See Also -------- mne.morph_labels : Morph a set of labels. Notes ----- This function will set label.pos to be all zeros. If the positions on the new surface are required, consider using `mne.read_surface` with ``label.vertices``. """ from .morph import compute_source_morph, grade_to_vertices subject_from = _check_subject(self.subject, subject_from) if not isinstance(subject_to, str): raise TypeError('"subject_to" must be entered as a string') if not isinstance(smooth, int): raise TypeError('smooth must be an integer') if np.all(self.values == 0): raise ValueError('Morphing label with all zero values will result ' 'in the label having no vertices. Consider using ' 'something like label.values.fill(1.0).') idx = 0 if self.hemi == 'lh' else 1 if isinstance(grade, np.ndarray): grade_ = [np.array([], int)] * 2 grade_[idx] = grade grade = grade_ del grade_ grade = grade_to_vertices(subject_to, grade, subjects_dir=subjects_dir) spacing = [np.array([], int)] * 2 spacing[idx] = grade[idx] vertices = [np.array([], int)] * 2 vertices[idx] = self.vertices data = self.values[:, np.newaxis] assert len(data) == sum(len(v) for v in vertices) stc = SourceEstimate(data, vertices, tmin=1, tstep=1, subject=subject_from) stc = compute_source_morph( stc, subject_from, subject_to, spacing=spacing, smooth=smooth, subjects_dir=subjects_dir, warn=False).apply(stc) inds = np.nonzero(stc.data)[0] self.values = stc.data[inds, :].ravel() self.pos = np.zeros((len(inds), 3)) self.vertices = stc.vertices[idx][inds] self.subject = subject_to return self @fill_doc def split(self, parts=2, subject=None, subjects_dir=None, freesurfer=False): """Split the Label into two or more parts. Parameters ---------- parts : int >= 2 | tuple of str | str Number of labels to create (default is 2), or tuple of strings specifying label names for new labels (from posterior to anterior), or 'contiguous' to split the label into connected components. If a number or 'contiguous' is specified, names of the new labels will be the input label's name with div1, div2 etc. appended. subject : None | str Subject which this label belongs to (needed to locate surface file; should only be specified if it is not specified in the label). %(subjects_dir)s freesurfer : bool By default (``False``) ``split_label`` uses an algorithm that is slightly optimized for performance and numerical precision. Set ``freesurfer`` to ``True`` in order to replicate label splits from FreeSurfer's ``mris_divide_parcellation``. Returns ------- labels : list of Label, shape (n_parts,) The labels, starting from the lowest to the highest end of the projection axis. Notes ----- If using 'contiguous' split, you must ensure that the label being split uses the same triangular resolution as the surface mesh files in ``subjects_dir`` Also, some small fringe labels may be returned that are close (but not connected) to the large components. The spatial split finds the label's principal eigen-axis on the spherical surface, projects all label vertex coordinates onto this axis, and divides them at regular spatial intervals. """ if isinstance(parts, str) and parts == 'contiguous': return _split_label_contig(self, subject, subjects_dir) elif isinstance(parts, (tuple, int)): return split_label(self, parts, subject, subjects_dir, freesurfer) else: raise ValueError("Need integer, tuple of strings, or string " "('contiguous'). Got %s)" % type(parts)) def get_vertices_used(self, vertices=None): """Get the source space's vertices inside the label. Parameters ---------- vertices : ndarray of int, shape (n_vertices,) | None The set of vertices to compare the label to. If None, equals to ``np.arange(10242)``. Defaults to None. Returns ------- label_verts : ndarray of in, shape (n_label_vertices,) The vertices of the label corresponding used by the data. """ if vertices is None: vertices = np.arange(10242) label_verts = vertices[np.in1d(vertices, self.vertices)] return label_verts def get_tris(self, tris, vertices=None): """Get the source space's triangles inside the label. Parameters ---------- tris : ndarray of int, shape (n_tris, 3) The set of triangles corresponding to the vertices in a source space. vertices : ndarray of int, shape (n_vertices,) | None The set of vertices to compare the label to. If None, equals to ``np.arange(10242)``. Defaults to None. Returns ------- label_tris : ndarray of int, shape (n_tris, 3) The subset of tris used by the label. """ vertices_ = self.get_vertices_used(vertices) selection = np.all(np.in1d(tris, vertices_).reshape(tris.shape), axis=1) label_tris = tris[selection] if len(np.unique(label_tris)) < len(vertices_): logger.info('Surprising label structure. Trying to repair ' 'triangles.') dropped_vertices = np.setdiff1d(vertices_, label_tris) n_dropped = len(dropped_vertices) assert n_dropped == (len(vertices_) - len(np.unique(label_tris))) # put missing vertices as extra zero-length triangles add_tris = (dropped_vertices + np.zeros((len(dropped_vertices), 3), dtype=int).T) label_tris = np.r_[label_tris, add_tris.T] assert len(np.unique(label_tris)) == len(vertices_) return label_tris @fill_doc def center_of_mass(self, subject=None, restrict_vertices=False, subjects_dir=None, surf='sphere'): """Compute the center of mass of the label. This function computes the spatial center of mass on the surface as in :footcite:`LarsonLee2013`. Parameters ---------- subject : str | None The subject the label is defined for. restrict_vertices : bool | array of int | instance of SourceSpaces If True, returned vertex will be one from the label. Otherwise, it could be any vertex from surf. If an array of int, the returned vertex will come from that array. If instance of SourceSpaces (as of 0.13), the returned vertex will be from the given source space. For most accuruate estimates, do not restrict vertices. %(subjects_dir)s surf : str The surface to use for Euclidean distance center of mass finding. The default here is "sphere", which finds the center of mass on the spherical surface to help avoid potential issues with cortical folding. Returns ------- vertex : int Vertex of the spatial center of mass for the inferred hemisphere, with each vertex weighted by its label value. See Also -------- SourceEstimate.center_of_mass vertex_to_mni Notes ----- .. versionadded:: 0.13 References ---------- .. footbibliography:: """ if not isinstance(surf, str): raise TypeError('surf must be a string, got %s' % (type(surf),)) subject = _check_subject(self.subject, subject) if np.any(self.values < 0): raise ValueError('Cannot compute COM with negative values') if np.all(self.values == 0): raise ValueError('Cannot compute COM with all values == 0. For ' 'structural labels, consider setting to ones via ' 'label.values[:] = 1.') vertex = _center_of_mass(self.vertices, self.values, self.hemi, surf, subject, subjects_dir, restrict_vertices) return vertex def _get_label_src(label, src): _validate_type(src, SourceSpaces, 'src') if src.kind != 'surface': raise RuntimeError('Cannot operate on SourceSpaces that are not ' 'surface type, got %s' % (src.kind,)) if label.hemi == 'lh': hemi_src = src[0] else: hemi_src = src[1] return hemi_src class BiHemiLabel(object): """A freesurfer/MNE label with vertices in both hemispheres. Parameters ---------- lh : Label Label for the left hemisphere. rh : Label Label for the right hemisphere. name : None | str Name for the label. color : None | color Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red). Note that due to file specification limitations, the color isn't saved to or loaded from files written to disk. Attributes ---------- lh : Label Label for the left hemisphere. rh : Label Label for the right hemisphere. name : None | str A name for the label. It is OK to change that attribute manually. subject : str | None Subject the label is from. """ def __init__(self, lh, rh, name=None, color=None): # noqa: D102 if lh.subject != rh.subject: raise ValueError('lh.subject (%s) and rh.subject (%s) must ' 'agree' % (lh.subject, rh.subject)) self.lh = lh self.rh = rh self.name = name self.subject = lh.subject self.color = color self.hemi = 'both' def __repr__(self): # noqa: D105 temp = "<BiHemiLabel | %s, lh : %i vertices, rh : %i vertices>" name = 'unknown, ' if self.subject is None else self.subject + ', ' name += repr(self.name) if self.name is not None else "unnamed" return temp % (name, len(self.lh), len(self.rh)) def __len__(self): """Return the number of vertices. Returns ------- n_vertices : int The number of vertices. """ return len(self.lh) + len(self.rh) def __add__(self, other): """Add labels.""" if isinstance(other, Label): if other.hemi == 'lh': lh = self.lh + other rh = self.rh else: lh = self.lh rh = self.rh + other elif isinstance(other, BiHemiLabel): lh = self.lh + other.lh rh = self.rh + other.rh else: raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other) name = '%s + %s' % (self.name, other.name) color = _blend_colors(self.color, other.color) return BiHemiLabel(lh, rh, name, color) def __sub__(self, other): """Subtract labels.""" _validate_type(other, (Label, BiHemiLabel), 'other') if isinstance(other, Label): if other.hemi == 'lh': lh = self.lh - other rh = self.rh else: rh = self.rh - other lh = self.lh else: # isinstance(other, BiHemiLabel) lh = self.lh - other.lh rh = self.rh - other.rh if len(lh.vertices) == 0: return rh elif len(rh.vertices) == 0: return lh else: name = '%s - %s' % (self.name, other.name) return BiHemiLabel(lh, rh, name, self.color) def read_label(filename, subject=None, color=None): """Read FreeSurfer Label file. Parameters ---------- filename : str Path to label file. subject : str | None Name of the subject the data are defined for. It is good practice to set this attribute to avoid combining incompatible labels and SourceEstimates (e.g., ones from other subjects). Note that due to file specification limitations, the subject name isn't saved to or loaded from files written to disk. color : None | matplotlib color Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red). Note that due to file specification limitations, the color isn't saved to or loaded from files written to disk. Returns ------- label : Label Instance of Label object with attributes: - ``comment``: comment from the first line of the label file - ``vertices``: vertex indices (0 based, column 1) - ``pos``: locations in meters (columns 2 - 4 divided by 1000) - ``values``: values at the vertices (column 5) See Also -------- read_labels_from_annot write_labels_to_annot """ if subject is not None and not isinstance(subject, str): raise TypeError('subject must be a string') # find hemi basename = op.basename(filename) if basename.endswith('lh.label') or basename.startswith('lh.'): hemi = 'lh' elif basename.endswith('rh.label') or basename.startswith('rh.'): hemi = 'rh' else: raise ValueError('Cannot find which hemisphere it is. File should end' ' with lh.label or rh.label: %s' % (basename,)) # find name if basename.startswith(('lh.', 'rh.')): basename_ = basename[3:] if basename.endswith('.label'): basename_ = basename[:-6] else: basename_ = basename[:-9] name = "%s-%s" % (basename_, hemi) # read the file with open(filename, 'r') as fid: comment = fid.readline().replace('\n', '')[1:] nv = int(fid.readline()) data = np.empty((5, nv)) for i, line in enumerate(fid): data[:, i] = line.split() # let's make sure everything is ordered correctly vertices = np.array(data[0], dtype=np.int32) pos = 1e-3 * data[1:4].T values = data[4] order = np.argsort(vertices) vertices = vertices[order] pos = pos[order] values = values[order] label = Label(vertices, pos, values, hemi, comment, name, filename, subject, color) return label @verbose def write_label(filename, label, verbose=None): """Write a FreeSurfer label. Parameters ---------- filename : str Path to label file to produce. label : Label The label object to save. %(verbose)s See Also -------- write_labels_to_annot Notes ----- Note that due to file specification limitations, the Label's subject and color attributes are not saved to disk. """ hemi = label.hemi path_head, name = op.split(filename) if name.endswith('.label'): name = name[:-6] if not (name.startswith(hemi) or name.endswith(hemi)): name += '-' + hemi filename = op.join(path_head, name) + '.label' logger.info('Saving label to : %s' % filename) with open(filename, 'wb') as fid: n_vertices = len(label.vertices) data = np.zeros((n_vertices, 5), dtype=np.float64) data[:, 0] = label.vertices data[:, 1:4] = 1e3 * label.pos data[:, 4] = label.values fid.write(b'#%s\n' % label.comment.encode()) fid.write(b'%d\n' % n_vertices) for d in data: fid.write(b'%d %f %f %f %f\n' % tuple(d)) def _prep_label_split(label, subject=None, subjects_dir=None): """Get label and subject information prior to label splitting.""" # If necessary, find the label if isinstance(label, BiHemiLabel): raise TypeError("Can only split labels restricted to one hemisphere.") elif isinstance(label, str): label = read_label(label) # Find the subject subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) if label.subject is None and subject is None: raise ValueError("The subject needs to be specified.") elif subject is None: subject = label.subject elif label.subject is None: pass elif subject != label.subject: raise ValueError("The label specifies a different subject (%r) from " "the subject parameter (%r)." % label.subject, subject) return label, subject, subjects_dir def _split_label_contig(label_to_split, subject=None, subjects_dir=None): """Split label into contiguous regions (i.e., connected components). Parameters ---------- label_to_split : Label | str Label which is to be split (Label object or path to a label file). subject : None | str Subject which this label belongs to (needed to locate surface file; should only be specified if it is not specified in the label). %(subjects_dir)s Returns ------- labels : list of Label The contiguous labels, in order of descending size. """ # Convert to correct input if necessary label_to_split, subject, subjects_dir = _prep_label_split(label_to_split, subject, subjects_dir) # Find the spherical surface to get vertices and tris surf_fname = '.'.join((label_to_split.hemi, 'sphere')) surf_path = op.join(subjects_dir, subject, 'surf', surf_fname) surface_points, surface_tris = read_surface(surf_path) # Get vertices we want to keep and compute mesh edges verts_arr = label_to_split.vertices edges_all = mesh_edges(surface_tris) # Subselect rows and cols of vertices that belong to the label select_edges = edges_all[verts_arr][:, verts_arr].tocoo() # Compute connected components and store as lists of vertex numbers comp_labels = _get_components(verts_arr, select_edges) # Convert to indices in the original surface space label_divs = [] for comp in comp_labels: label_divs.append(verts_arr[comp]) # Construct label division names n_parts = len(label_divs) if label_to_split.name.endswith(('lh', 'rh')): basename = label_to_split.name[:-3] name_ext = label_to_split.name[-3:] else: basename = label_to_split.name name_ext = '' name_pattern = "%s_div%%i%s" % (basename, name_ext) names = tuple(name_pattern % i for i in range(1, n_parts + 1)) # Colors if label_to_split.color is None: colors = (None,) * n_parts else: colors = _split_colors(label_to_split.color, n_parts) # Sort label divisions by their size (in vertices) label_divs.sort(key=lambda x: len(x), reverse=True) labels = [] for div, name, color in zip(label_divs, names, colors): # Get indices of dipoles within this division of the label verts = np.array(sorted(list(div)), int) vert_indices = np.in1d(verts_arr, verts, assume_unique=True) # Set label attributes pos = label_to_split.pos[vert_indices] values = label_to_split.values[vert_indices] hemi = label_to_split.hemi comment = label_to_split.comment lbl = Label(verts, pos, values, hemi, comment, name, None, subject, color) labels.append(lbl) return labels @fill_doc def split_label(label, parts=2, subject=None, subjects_dir=None, freesurfer=False): """Split a Label into two or more parts. Parameters ---------- label : Label | str Label which is to be split (Label object or path to a label file). parts : int >= 2 | tuple of str A sequence of strings specifying label names for the new labels (from posterior to anterior), or the number of new labels to create (default is 2). If a number is specified, names of the new labels will be the input label's name with div1, div2 etc. appended. subject : None | str Subject which this label belongs to (needed to locate surface file; should only be specified if it is not specified in the label). %(subjects_dir)s freesurfer : bool By default (``False``) ``split_label`` uses an algorithm that is slightly optimized for performance and numerical precision. Set ``freesurfer`` to ``True`` in order to replicate label splits from FreeSurfer's ``mris_divide_parcellation``. Returns ------- labels : list of Label, shape (n_parts,) The labels, starting from the lowest to the highest end of the projection axis. Notes ----- Works by finding the label's principal eigen-axis on the spherical surface, projecting all label vertex coordinates onto this axis and dividing them at regular spatial intervals. """ from scipy import linalg label, subject, subjects_dir = _prep_label_split(label, subject, subjects_dir) # find the parts if np.isscalar(parts): n_parts = int(parts) if label.name.endswith(('lh', 'rh')): basename = label.name[:-3] name_ext = label.name[-3:] else: basename = label.name name_ext = '' name_pattern = "%s_div%%i%s" % (basename, name_ext) names = tuple(name_pattern % i for i in range(1, n_parts + 1)) else: names = parts n_parts = len(names) if n_parts < 2: raise ValueError("Can't split label into %i parts" % n_parts) # find the spherical surface surf_fname = '.'.join((label.hemi, 'sphere')) surf_path = op.join(subjects_dir, subject, "surf", surf_fname) surface_points, surface_tris = read_surface(surf_path) # find the label coordinates on the surface points = surface_points[label.vertices] center = np.mean(points, axis=0) centered_points = points - center # find the label's normal if freesurfer: # find the Freesurfer vertex closest to the center distance = np.sqrt(np.sum(centered_points ** 2, axis=1)) i_closest = np.argmin(distance) closest_vertex = label.vertices[i_closest] # find the normal according to freesurfer convention idx = np.any(surface_tris == closest_vertex, axis=1) tris_for_normal = surface_tris[idx] r1 = surface_points[tris_for_normal[:, 0], :] r2 = surface_points[tris_for_normal[:, 1], :] r3 = surface_points[tris_for_normal[:, 2], :] tri_normals = fast_cross_3d((r2 - r1), (r3 - r1)) normal = np.mean(tri_normals, axis=0) normal /= linalg.norm(normal) else: # Normal of the center normal = center / linalg.norm(center) # project all vertex coordinates on the tangential plane for this point q, _ = linalg.qr(normal[:, np.newaxis]) tangent_u = q[:, 1:] m_obs = np.dot(centered_points, tangent_u) # find principal eigendirection m_cov = np.dot(m_obs.T, m_obs) w, vr = linalg.eig(m_cov) i = np.argmax(w) eigendir = vr[:, i] # project back into 3d space axis = np.dot(tangent_u, eigendir) # orient them from posterior to anterior if axis[1] < 0: axis *= -1 # project the label on the axis proj = np.dot(points, axis) # assign mark (new label index) proj -= proj.min() proj /= (proj.max() / n_parts) mark = proj // 1 mark[mark == n_parts] = n_parts - 1 # colors if label.color is None: colors = (None,) * n_parts else: colors = _split_colors(label.color, n_parts) # construct new labels labels = [] for i, name, color in zip(range(n_parts), names, colors): idx = (mark == i) vert = label.vertices[idx] pos = label.pos[idx] values = label.values[idx] hemi = label.hemi comment = label.comment lbl = Label(vert, pos, values, hemi, comment, name, None, subject, color) labels.append(lbl) return labels def label_sign_flip(label, src): """Compute sign for label averaging. Parameters ---------- label : Label | BiHemiLabel A label. src : SourceSpaces The source space over which the label is defined. Returns ------- flip : array Sign flip vector (contains 1 or -1). """ from scipy import linalg if len(src) != 2: raise ValueError('Only source spaces with 2 hemisphers are accepted') lh_vertno = src[0]['vertno'] rh_vertno = src[1]['vertno'] # get source orientations ori = list() if label.hemi in ('lh', 'both'): vertices = label.vertices if label.hemi == 'lh' else label.lh.vertices vertno_sel = np.intersect1d(lh_vertno, vertices) ori.append(src[0]['nn'][vertno_sel]) if label.hemi in ('rh', 'both'): vertices = label.vertices if label.hemi == 'rh' else label.rh.vertices vertno_sel = np.intersect1d(rh_vertno, vertices) ori.append(src[1]['nn'][vertno_sel]) if len(ori) == 0: raise Exception('Unknown hemisphere type "%s"' % (label.hemi,)) ori = np.concatenate(ori, axis=0) if len(ori) == 0: return np.array([], int) _, _, Vh = linalg.svd(ori, full_matrices=False) # The sign of Vh is ambiguous, so we should align to the max-positive # (outward) direction dots = np.dot(ori, Vh[0]) if np.mean(dots) < 0: dots *= -1 # Comparing to the direction of the first right singular vector flip = np.sign(dots) return flip @verbose def stc_to_label(stc, src=None, smooth=True, connected=False, subjects_dir=None, verbose=None): """Compute a label from the non-zero sources in an stc object. Parameters ---------- stc : SourceEstimate The source estimates. src : SourceSpaces | str | None The source space over which the source estimates are defined. If it's a string it should the subject name (e.g. fsaverage). Can be None if stc.subject is not None. smooth : bool Fill in vertices on the cortical surface that are not in the source space based on the closest source space vertex (requires src to be a SourceSpace). connected : bool If True a list of connected labels will be returned in each hemisphere. The labels are ordered in decreasing order depending of the maximum value in the stc. %(subjects_dir)s %(verbose)s Returns ------- labels : list of Label | list of list of Label The generated labels. If connected is False, it returns a list of Labels (one per hemisphere). If no Label is available in a hemisphere, None is returned. If connected is True, it returns for each hemisphere a list of connected labels ordered in decreasing order depending of the maximum value in the stc. If no Label is available in an hemisphere, an empty list is returned. """ if not isinstance(smooth, bool): raise ValueError('smooth should be True or False. Got %s.' % smooth) src = stc.subject if src is None else src if src is None: raise ValueError('src cannot be None if stc.subject is None') if isinstance(src, str): subject = src else: subject = stc.subject if not isinstance(stc, SourceEstimate): raise ValueError('SourceEstimate should be surface source estimates') if isinstance(src, str): if connected: raise ValueError('The option to return only connected labels is ' 'only available if source spaces are provided.') if smooth: msg = ("stc_to_label with smooth=True requires src to be an " "instance of SourceSpace") raise ValueError(msg) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) surf_path_from = op.join(subjects_dir, src, 'surf') rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white')) rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white')) rr = [rr_lh, rr_rh] tris = [tris_lh, tris_rh] else: if not isinstance(src, SourceSpaces): raise TypeError('src must be a string or a set of source spaces') if len(src) != 2: raise ValueError('source space should contain the 2 hemispheres') rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']] tris = [src[0]['tris'], src[1]['tris']] src_conn = spatial_src_adjacency(src).tocsr() labels = [] cnt = 0 cnt_full = 0 for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate( zip(['lh', 'rh'], stc.vertices, tris, rr)): this_data = stc.data[cnt:cnt + len(this_vertno)] if connected: # we know src *must* be a SourceSpaces now vertno = np.where(src[hemi_idx]['inuse'])[0] if not len(np.setdiff1d(this_vertno, vertno)) == 0: raise RuntimeError('stc contains vertices not present ' 'in source space, did you morph?') tmp = np.zeros((len(vertno), this_data.shape[1])) this_vertno_idx = np.searchsorted(vertno, this_vertno) tmp[this_vertno_idx] = this_data this_data = tmp offset = cnt_full + len(this_data) this_src_adj = src_conn[cnt_full:offset, cnt_full:offset].tocoo() this_data_abs_max = np.abs(this_data).max(axis=1) clusters, _ = _find_clusters(this_data_abs_max, 0., adjacency=this_src_adj) cnt_full += len(this_data) # Then order clusters in descending order based on maximum value clusters_max = np.argsort([np.max(this_data_abs_max[c]) for c in clusters])[::-1] clusters = [clusters[k] for k in clusters_max] clusters = [vertno[c] for c in clusters] else: clusters = [this_vertno[np.any(this_data, axis=1)]] cnt += len(this_vertno) clusters = [c for c in clusters if len(c) > 0] if len(clusters) == 0: if not connected: this_labels = None else: this_labels = [] else: this_labels = [] colors = _n_colors(len(clusters)) for c, color in zip(clusters, colors): idx_use = c label = Label(idx_use, this_rr[idx_use], None, hemi, 'Label from stc', subject=subject, color=color) if smooth: label = label.fill(src) this_labels.append(label) if not connected: this_labels = this_labels[0] labels.append(this_labels) return labels def _verts_within_dist(graph, sources, max_dist): """Find all vertices wihin a maximum geodesic distance from source. Parameters ---------- graph : scipy.sparse.csr_matrix Sparse matrix with distances between adjacent vertices. sources : list of int Source vertices. max_dist : float Maximum geodesic distance. Returns ------- verts : array Vertices within max_dist. dist : array Distances from source vertex. """ dist_map = {} verts_added_last = [] for source in sources: dist_map[source] = 0 verts_added_last.append(source) # add neighbors until no more neighbors within max_dist can be found while len(verts_added_last) > 0: verts_added = [] for i in verts_added_last: v_dist = dist_map[i] row = graph[i, :] neighbor_vert = row.indices neighbor_dist = row.data for j, d in zip(neighbor_vert, neighbor_dist): n_dist = v_dist + d if j in dist_map: if n_dist < dist_map[j]: dist_map[j] = n_dist else: if n_dist <= max_dist: dist_map[j] = n_dist # we found a new vertex within max_dist verts_added.append(j) verts_added_last = verts_added verts = np.sort(np.array(list(dist_map.keys()), int)) dist = np.array([dist_map[v] for v in verts], int) return verts, dist def _grow_labels(seeds, extents, hemis, names, dist, vert, subject): """Parallelize grow_labels.""" labels = [] for seed, extent, hemi, name in zip(seeds, extents, hemis, names): label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent) # create a label if len(seed) == 1: seed_repr = str(seed) else: seed_repr = ','.join(map(str, seed)) comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr, extent) label = Label(vertices=label_verts, pos=vert[hemi][label_verts], values=label_dist, hemi=hemi, comment=comment, name=str(name), subject=subject) labels.append(label) return labels @fill_doc def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1, overlap=True, names=None, surface='white', colors=None): """Generate circular labels in source space with region growing. This function generates a number of labels in source space by growing regions starting from the vertices defined in "seeds". For each seed, a label is generated containing all vertices within a maximum geodesic distance on the white matter surface from the seed. Parameters ---------- subject : str Name of the subject as in SUBJECTS_DIR. seeds : int | list Seed, or list of seeds. Each seed can be either a vertex number or a list of vertex numbers. extents : array | float Extents (radius in mm) of the labels. hemis : array | int Hemispheres to use for the labels (0: left, 1: right). %(subjects_dir)s %(n_jobs)s Likely only useful if tens or hundreds of labels are being expanded simultaneously. Does not apply with ``overlap=False``. overlap : bool Produce overlapping labels. If True (default), the resulting labels can be overlapping. If False, each label will be grown one step at a time, and occupied territory will not be invaded. names : None | list of str Assign names to the new labels (list needs to have the same length as seeds). surface : str The surface used to grow the labels, defaults to the white surface. colors : array, shape (n, 4) or (, 4) | None How to assign colors to each label. If None then unique colors will be chosen automatically (default), otherwise colors will be broadcast from the array. The first three values will be interpreted as RGB colors and the fourth column as the alpha value (commonly 1). Returns ------- labels : list of Label The labels' ``comment`` attribute contains information on the seed vertex and extent; the ``values`` attribute contains distance from the seed in millimeters. Notes ----- "extents" and "hemis" can either be arrays with the same length as seeds, which allows using a different extent and hemisphere for label, or integers, in which case the same extent and hemisphere is used for each label. """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) n_jobs = check_n_jobs(n_jobs) # make sure the inputs are arrays if np.isscalar(seeds): seeds = [seeds] seeds = [np.atleast_1d(seed) for seed in seeds] extents = np.atleast_1d(extents) hemis = np.atleast_1d(hemis) n_seeds = len(seeds) if len(extents) != 1 and len(extents) != n_seeds: raise ValueError('The extents parameter has to be of length 1 or ' 'len(seeds)') if len(hemis) != 1 and len(hemis) != n_seeds: raise ValueError('The hemis parameter has to be of length 1 or ' 'len(seeds)') if colors is not None: if len(colors.shape) == 1: # if one color for all seeds n_colors = 1 n = colors.shape[0] else: n_colors, n = colors.shape if n_colors != n_seeds and n_colors != 1: msg = ('Number of colors (%d) and seeds (%d) are not compatible.' % (n_colors, n_seeds)) raise ValueError(msg) if n != 4: msg = 'Colors must have 4 values (RGB and alpha), not %d.' % n raise ValueError(msg) # make the arrays the same length as seeds if len(extents) == 1: extents = np.tile(extents, n_seeds) if len(hemis) == 1: hemis = np.tile(hemis, n_seeds) hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis]) # names if names is None: names = ["Label_%i-%s" % items for items in enumerate(hemis)] else: if np.isscalar(names): names = [names] if len(names) != n_seeds: raise ValueError('The names parameter has to be None or have ' 'length len(seeds)') for i, hemi in enumerate(hemis): if not names[i].endswith(hemi): names[i] = '-'.join((names[i], hemi)) names = np.array(names) # load the surfaces and create the distance graphs tris, vert, dist = {}, {}, {} for hemi in set(hemis): surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' + surface) vert[hemi], tris[hemi] = read_surface(surf_fname) dist[hemi] = mesh_dist(tris[hemi], vert[hemi]) if overlap: # create the patches parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs) seeds = np.array_split(np.array(seeds, dtype='O'), n_jobs) extents = np.array_split(extents, n_jobs) hemis = np.array_split(hemis, n_jobs) names = np.array_split(names, n_jobs) labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject) for s, e, h, n in zip(seeds, extents, hemis, names)), []) else: # special procedure for non-overlapping labels labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis, vert, dist, names) if colors is None: # add a unique color to each label label_colors = _n_colors(len(labels)) else: # use specified colors label_colors = np.empty((len(labels), 4)) label_colors[:] = colors for label, color in zip(labels, label_colors): label.color = color return labels def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_, graphs, names_): """Grow labels while ensuring that they don't overlap.""" labels = [] for hemi in set(hemis): hemi_index = (hemis == hemi) seeds = [seed for seed, h in zip(seeds_, hemis) if h == hemi] extents = extents_[hemi_index] names = names_[hemi_index] graph = graphs[hemi] # distance graph n_vertices = len(vertices_[hemi]) n_labels = len(seeds) # prepare parcellation parc = np.empty(n_vertices, dtype='int32') parc[:] = -1 # initialize active sources sources = {} # vert -> (label, dist_from_seed) edge = [] # queue of vertices to process for label, seed in enumerate(seeds): if np.any(parc[seed] >= 0): raise ValueError("Overlapping seeds") parc[seed] = label for s in np.atleast_1d(seed): sources[s] = (label, 0.) edge.append(s) # grow from sources while edge: vert_from = edge.pop(0) label, old_dist = sources[vert_from] # add neighbors within allowable distance row = graph[vert_from, :] for vert_to, dist in zip(row.indices, row.data): # Prevent adding a point that has already been used # (prevents infinite loop) if (vert_to == seeds[label]).any(): continue new_dist = old_dist + dist # abort if outside of extent if new_dist > extents[label]: continue vert_to_label = parc[vert_to] if vert_to_label >= 0: _, vert_to_dist = sources[vert_to] # abort if the vertex is occupied by a closer seed if new_dist > vert_to_dist: continue elif vert_to in edge: edge.remove(vert_to) # assign label value parc[vert_to] = label sources[vert_to] = (label, new_dist) edge.append(vert_to) # convert parc to labels for i in range(n_labels): vertices = np.nonzero(parc == i)[0] name = str(names[i]) label_ = Label(vertices, hemi=hemi, name=name, subject=subject) labels.append(label_) return labels @fill_doc def random_parcellation(subject, n_parcel, hemi, subjects_dir=None, surface='white', random_state=None): """Generate random cortex parcellation by growing labels. This function generates a number of labels which don't intersect and cover the whole surface. Regions are growing around randomly chosen seeds. Parameters ---------- subject : str Name of the subject as in SUBJECTS_DIR. n_parcel : int Total number of cortical parcels. hemi : str Hemisphere id (ie 'lh', 'rh', 'both'). In the case of 'both', both hemispheres are processed with (n_parcel // 2) parcels per hemisphere. %(subjects_dir)s surface : str The surface used to grow the labels, defaults to the white surface. %(random_state)s Returns ------- labels : list of Label Random cortex parcellation. """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) if hemi == 'both': hemi = ['lh', 'rh'] hemis = np.atleast_1d(hemi) # load the surfaces and create the distance graphs tris, vert, dist = {}, {}, {} for hemi in set(hemis): surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' + surface) vert[hemi], tris[hemi] = read_surface(surf_fname) dist[hemi] = mesh_dist(tris[hemi], vert[hemi]) # create the patches labels = _cortex_parcellation(subject, n_parcel, hemis, vert, dist, random_state) # add a unique color to each label colors = _n_colors(len(labels)) for label, color in zip(labels, colors): label.color = color return labels def _cortex_parcellation(subject, n_parcel, hemis, vertices_, graphs, random_state=None): """Random cortex parcellation.""" labels = [] rng = check_random_state(random_state) for hemi in set(hemis): parcel_size = len(hemis) * len(vertices_[hemi]) // n_parcel graph = graphs[hemi] # distance graph n_vertices = len(vertices_[hemi]) # prepare parcellation parc = np.full(n_vertices, -1, dtype='int32') # initialize active sources s = rng.choice(range(n_vertices)) label_idx = 0 edge = [s] # queue of vertices to process parc[s] = label_idx label_size = 1 rest = len(parc) - 1 # grow from sources while rest: # if there are not free neighbors, start new parcel if not edge: rest_idx = np.where(parc < 0)[0] s = rng.choice(rest_idx) edge = [s] label_idx += 1 label_size = 1 parc[s] = label_idx rest -= 1 vert_from = edge.pop(0) # add neighbors within allowable distance # row = graph[vert_from, :] # row_indices, row_data = row.indices, row.data sl = slice(graph.indptr[vert_from], graph.indptr[vert_from + 1]) row_indices, row_data = graph.indices[sl], graph.data[sl] for vert_to, dist in zip(row_indices, row_data): vert_to_label = parc[vert_to] # abort if the vertex is already occupied if vert_to_label >= 0: continue # abort if outside of extent if label_size > parcel_size: label_idx += 1 label_size = 1 edge = [vert_to] parc[vert_to] = label_idx rest -= 1 break # assign label value parc[vert_to] = label_idx label_size += 1 edge.append(vert_to) rest -= 1 # merging small labels # label adjacency matrix n_labels = label_idx + 1 label_sizes = np.empty(n_labels, dtype=int) label_conn = np.zeros([n_labels, n_labels], dtype='bool') for i in range(n_labels): vertices = np.nonzero(parc == i)[0] label_sizes[i] = len(vertices) neighbor_vertices = graph[vertices, :].indices neighbor_labels = np.unique(np.array(parc[neighbor_vertices])) label_conn[i, neighbor_labels] = 1 np.fill_diagonal(label_conn, 0) # merging label_id = range(n_labels) while n_labels > n_parcel // len(hemis): # smallest label and its smallest neighbor i = np.argmin(label_sizes) neighbors = np.nonzero(label_conn[i, :])[0] j = neighbors[np.argmin(label_sizes[neighbors])] # merging two labels label_conn[j, :] += label_conn[i, :] label_conn[:, j] += label_conn[:, i] label_conn = np.delete(label_conn, i, 0) label_conn = np.delete(label_conn, i, 1) label_conn[j, j] = 0 label_sizes[j] += label_sizes[i] label_sizes = np.delete(label_sizes, i, 0) n_labels -= 1 vertices = np.nonzero(parc == label_id[i])[0] parc[vertices] = label_id[j] label_id = np.delete(label_id, i, 0) # convert parc to labels for i in range(n_labels): vertices = np.nonzero(parc == label_id[i])[0] name = 'label_' + str(i) label_ = Label(vertices, hemi=hemi, name=name, subject=subject) labels.append(label_) return labels def _read_annot_cands(dir_name, raise_error=True): """List the candidate parcellations.""" if not op.isdir(dir_name): if not raise_error: return list() raise IOError('Directory for annotation does not exist: %s', dir_name) cands = os.listdir(dir_name) cands = sorted(set(c.replace('lh.', '').replace('rh.', '').replace( '.annot', '') for c in cands if '.annot' in c), key=lambda x: x.lower()) # exclude .ctab files cands = [c for c in cands if '.ctab' not in c] return cands def _read_annot(fname): """Read a Freesurfer annotation from a .annot file. Note : Copied from PySurfer Parameters ---------- fname : str Path to annotation file Returns ------- annot : numpy array, shape=(n_verts) Annotation id at each vertex ctab : numpy array, shape=(n_entries, 5) RGBA + label id colortable array names : list of str List of region names as stored in the annot file """ if not op.isfile(fname): dir_name = op.split(fname)[0] cands = _read_annot_cands(dir_name) if len(cands) == 0: raise IOError('No such file %s, no candidate parcellations ' 'found in directory' % fname) else: raise IOError('No such file %s, candidate parcellations in ' 'that directory:\n%s' % (fname, '\n'.join(cands))) with open(fname, "rb") as fid: n_verts = np.fromfile(fid, '>i4', 1)[0] data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2) annot = data[data[:, 0], 1] ctab_exists = np.fromfile(fid, '>i4', 1)[0] if not ctab_exists: raise Exception('Color table not found in annotation file') n_entries = np.fromfile(fid, '>i4', 1)[0] if n_entries > 0: length = np.fromfile(fid, '>i4', 1)[0] np.fromfile(fid, '>c', length) # discard orig_tab names = list() ctab = np.zeros((n_entries, 5), np.int64) for i in range(n_entries): name_length = np.fromfile(fid, '>i4', 1)[0] name = np.fromfile(fid, "|S%d" % name_length, 1)[0] names.append(name) ctab[i, :4] = np.fromfile(fid, '>i4', 4) ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) + ctab[i, 2] * (2 ** 16) + ctab[i, 3] * (2 ** 24)) else: ctab_version = -n_entries if ctab_version != 2: raise Exception('Color table version not supported') n_entries = np.fromfile(fid, '>i4', 1)[0] ctab = np.zeros((n_entries, 5), np.int64) length = np.fromfile(fid, '>i4', 1)[0] np.fromfile(fid, "|S%d" % length, 1) # Orig table path entries_to_read = np.fromfile(fid, '>i4', 1)[0] names = list() for i in range(entries_to_read): np.fromfile(fid, '>i4', 1) # Structure name_length = np.fromfile(fid, '>i4', 1)[0] name = np.fromfile(fid, "|S%d" % name_length, 1)[0] names.append(name) ctab[i, :4] = np.fromfile(fid, '>i4', 4) ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) + ctab[i, 2] * (2 ** 16)) # convert to more common alpha value ctab[:, 3] = 255 - ctab[:, 3] return annot, ctab, names def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir): """Get the .annot filenames and hemispheres.""" if annot_fname is not None: # we use use the .annot file specified by the user hemis = [op.basename(annot_fname)[:2]] if hemis[0] not in ['lh', 'rh']: raise ValueError('Could not determine hemisphere from filename, ' 'filename has to start with "lh" or "rh".') annot_fname = [annot_fname] else: # construct .annot file names for requested subject, parc, hemi _check_option('hemi', hemi, ['lh', 'rh', 'both']) if hemi == 'both': hemis = ['lh', 'rh'] else: hemis = [hemi] subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc) annot_fname = [dst % hemi_ for hemi_ in hemis] return annot_fname, hemis def _load_vert_pos(subject, subjects_dir, surf_name, hemi, n_expected, extra=''): fname_surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surf_name)) vert_pos, _ = read_surface(fname_surf) vert_pos /= 1e3 # the positions in labels are in meters if len(vert_pos) != n_expected: raise RuntimeError('Number of surface vertices (%s) for subject %s' ' does not match the expected number of vertices' '(%s)%s' % (len(vert_pos), subject, n_expected, extra)) return vert_pos @verbose def read_labels_from_annot(subject, parc='aparc', hemi='both', surf_name='white', annot_fname=None, regexp=None, subjects_dir=None, sort=True, verbose=None): """Read labels from a FreeSurfer annotation file. Note: Only cortical labels will be returned. Parameters ---------- subject : str The subject for which to read the parcellation. parc : str The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'. hemi : str The hemisphere from which to read the parcellation, can be 'lh', 'rh', or 'both'. surf_name : str Surface used to obtain vertex locations, e.g., 'white', 'pial'. annot_fname : str or None Filename of the .annot file. If not None, only this file is read and 'parc' and 'hemi' are ignored. regexp : str Regular expression or substring to select particular labels from the parcellation. E.g. 'superior' will return all labels in which this substring is contained. %(subjects_dir)s sort : bool If true, labels will be sorted by name before being returned. .. versionadded:: 0.21.0 %(verbose)s Returns ------- labels : list of Label The labels, sorted by label name (ascending). See Also -------- write_labels_to_annot morph_labels """ logger.info('Reading labels from parcellation...') subjects_dir = get_subjects_dir(subjects_dir) # get the .annot filenames and hemispheres annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir) if regexp is not None: # allow for convenient substring match r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum() else regexp)) # now we are ready to create the labels n_read = 0 labels = list() orig_names = set() for fname, hemi in zip(annot_fname, hemis): # read annotation annot, ctab, label_names = _read_annot(fname) label_rgbas = ctab[:, :4] / 255. label_ids = ctab[:, -1] # load the vertex positions from surface vert_pos = _load_vert_pos( subject, subjects_dir, surf_name, hemi, len(annot), extra='for annotation file %s' % fname) for label_id, label_name, label_rgba in\ zip(label_ids, label_names, label_rgbas): vertices = np.where(annot == label_id)[0] if len(vertices) == 0: # label is not part of cortical surface continue label_name = label_name.decode() orig_names.add(label_name) name = f'{label_name}-{hemi}' if (regexp is not None) and not r_.match(name): continue pos = vert_pos[vertices, :] label = Label(vertices, pos, hemi=hemi, name=name, subject=subject, color=tuple(label_rgba)) labels.append(label) n_read = len(labels) - n_read logger.info(' read %d labels from %s' % (n_read, fname)) # sort the labels by label name if sort: labels = sorted(labels, key=lambda l: l.name) if len(labels) == 0: msg = 'No labels found.' if regexp is not None: orig_names = '\n'.join(sorted(orig_names)) msg += (f' Maybe the regular expression {repr(regexp)} did not ' f'match any of:\n{orig_names}') raise RuntimeError(msg) return labels def _check_labels_subject(labels, subject, name): _validate_type(labels, (list, tuple), 'labels') for label in labels: _validate_type(label, Label, 'each entry in labels') if subject is None: subject = label.subject if subject is not None: # label.subject can be None, depending on init if subject != label.subject: raise ValueError('Got multiple values of %s: %s and %s' % (name, subject, label.subject)) if subject is None: raise ValueError('if label.subject is None for all labels, ' '%s must be provided' % name) return subject @verbose def morph_labels(labels, subject_to, subject_from=None, subjects_dir=None, surf_name='white', verbose=None): """Morph a set of labels. This is useful when morphing a set of non-overlapping labels (such as those obtained with :func:`read_labels_from_annot`) from one subject to another. Parameters ---------- labels : list The labels to morph. subject_to : str The subject to morph labels to. subject_from : str | None The subject to morph labels from. Can be None if the labels have the ``.subject`` property defined. %(subjects_dir)s surf_name : str Surface used to obtain vertex locations, e.g., 'white', 'pial'. %(verbose)s Returns ------- labels : list The morphed labels. See Also -------- read_labels_from_annot mne.Label.morph Notes ----- This does not use the same algorithm as Freesurfer, so the results morphing (e.g., from ``'fsaverage'`` to your subject) might not match what Freesurfer produces during ``recon-all``. .. versionadded:: 0.18 """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) subject_from = _check_labels_subject(labels, subject_from, 'subject_from') mmaps = read_morph_map(subject_from, subject_to, subjects_dir) vert_poss = [_load_vert_pos(subject_to, subjects_dir, surf_name, hemi, mmap.shape[0]) for hemi, mmap in zip(('lh', 'rh'), mmaps)] idxs = [mmap.argmax(axis=1) for mmap in mmaps] out_labels = list() values = filename = None for label in labels: li = dict(lh=0, rh=1)[label.hemi] vertices = np.where(np.in1d(idxs[li], label.vertices))[0] pos = vert_poss[li][vertices] out_labels.append( Label(vertices, pos, values, label.hemi, label.comment, label.name, filename, subject_to, label.color, label.verbose)) return out_labels @verbose def labels_to_stc(labels, values, tmin=0, tstep=1, subject=None, src=None, verbose=None): """Convert a set of labels and values to a STC. This function is meant to work like the opposite of `extract_label_time_course`. Parameters ---------- %(eltc_labels)s values : ndarray, shape (n_labels, ...) The values in each label. Can be 1D or 2D. tmin : float The tmin to use for the STC. tstep : float The tstep to use for the STC. subject : str | None The subject for which to create the STC. %(eltc_src)s Can be omitted if using a surface source space, in which case the label vertices will determine the output STC vertices. Required if using a volumetric source space. .. versionadded:: 0.22 %(verbose)s Returns ------- stc : instance of SourceEstimate | instance of VolSourceEstimate The values-in-labels converted to a STC. See Also -------- extract_label_time_course Notes ----- Vertices that appear in more than one label will be averaged. .. versionadded:: 0.18 """ values = np.array(values, float) if values.ndim == 1: values = values[:, np.newaxis] if values.ndim != 2: raise ValueError('values must have 1 or 2 dimensions, got %s' % (values.ndim,)) _validate_type(src, (SourceSpaces, None)) if src is None: data, vertices, subject = _labels_to_stc_surf( labels, values, tmin, tstep, subject) klass = SourceEstimate else: kind = src.kind subject = _check_subject( src._subject, subject, first_kind='source space subject', raise_error=False) _check_option('source space kind', kind, ('surface', 'volume')) if kind == 'volume': klass = VolSourceEstimate else: klass = SourceEstimate # Easiest way is to get a dot-able operator and use it vertices = [s['vertno'].copy() for s in src] stc = klass( np.eye(sum(len(v) for v in vertices)), vertices, 0, 1, subject) label_op = extract_label_time_course( stc, labels, src=src, mode='mean', allow_empty=True) _check_values_labels(values, label_op.shape[0]) rev_op = np.zeros(label_op.shape[::-1]) rev_op[np.arange(label_op.shape[1]), np.argmax(label_op, axis=0)] = 1. data = rev_op @ values return klass(data, vertices, tmin, tstep, subject, verbose) def _check_values_labels(values, n_labels): if n_labels != len(values): raise ValueError( f'values.shape[0] ({values.shape[0]}) must match the number of ' f'labels ({n_labels})') def _labels_to_stc_surf(labels, values, tmin, tstep, subject): from scipy import sparse subject = _check_labels_subject(labels, subject, 'subject') _check_values_labels(values, len(labels)) vertices = dict(lh=[], rh=[]) data = dict(lh=[], rh=[]) for li, label in enumerate(labels): data[label.hemi].append( np.repeat(values[li][np.newaxis], len(label.vertices), axis=0)) vertices[label.hemi].append(label.vertices) hemis = ('lh', 'rh') for hemi in hemis: vertices[hemi] = np.concatenate(vertices[hemi], axis=0) data[hemi] = np.concatenate(data[hemi], axis=0).astype(float) cols = np.arange(len(vertices[hemi])) vertices[hemi], rows = np.unique(vertices[hemi], return_inverse=True) mat = sparse.coo_matrix((np.ones(len(rows)), (rows, cols))).tocsr() mat = mat * sparse.diags(1. / np.asarray(mat.sum(axis=-1))[:, 0]) data[hemi] = mat.dot(data[hemi]) vertices = [vertices[hemi] for hemi in hemis] data = np.concatenate([data[hemi] for hemi in hemis], axis=0) return data, vertices, subject _DEFAULT_TABLE_NAME = 'MNE-Python Colortable' def _write_annot(fname, annot, ctab, names, table_name=_DEFAULT_TABLE_NAME): """Write a Freesurfer annotation to a .annot file.""" assert len(names) == len(ctab) with open(fname, 'wb') as fid: n_verts = len(annot) np.array(n_verts, dtype='>i4').tofile(fid) data = np.zeros((n_verts, 2), dtype='>i4') data[:, 0] = np.arange(n_verts) data[:, 1] = annot data.ravel().tofile(fid) # indicate that color table exists np.array(1, dtype='>i4').tofile(fid) # color table version 2 np.array(-2, dtype='>i4').tofile(fid) # write color table n_entries = len(ctab) np.array(n_entries, dtype='>i4').tofile(fid) # write our color table name _write_annot_str(fid, table_name) # number of entries to write np.array(n_entries, dtype='>i4').tofile(fid) # write entries for ii, (name, color) in enumerate(zip(names, ctab)): np.array(ii, dtype='>i4').tofile(fid) _write_annot_str(fid, name) np.array(color[:4], dtype='>i4').tofile(fid) def _write_annot_str(fid, s): s = s.encode('ascii') + b'\x00' np.array(len(s), '>i4').tofile(fid) fid.write(s) @verbose def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False, subjects_dir=None, annot_fname=None, colormap='hsv', hemi='both', sort=True, table_name=_DEFAULT_TABLE_NAME, verbose=None): r"""Create a FreeSurfer annotation from a list of labels. Parameters ---------- labels : list with instances of mne.Label The labels to create a parcellation from. subject : str | None The subject for which to write the parcellation. parc : str | None The parcellation name to use. overwrite : bool Overwrite files if they already exist. %(subjects_dir)s annot_fname : str | None Filename of the .annot file. If not None, only this file is written and 'parc' and 'subject' are ignored. colormap : str Colormap to use to generate label colors for labels that do not have a color specified. hemi : 'both' | 'lh' | 'rh' The hemisphere(s) for which to write \*.annot files (only applies if annot_fname is not specified; default is 'both'). sort : bool If True (default), labels will be sorted by name before writing. .. versionadded:: 0.21.0 table_name : str The table name to use for the colortable. .. versionadded:: 0.21.0 %(verbose)s See Also -------- read_labels_from_annot Notes ----- Vertices that are not covered by any of the labels are assigned to a label named "unknown". """ logger.info('Writing labels to parcellation...') subjects_dir = get_subjects_dir(subjects_dir) # get the .annot filenames and hemispheres annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir) if not overwrite: for fname in annot_fname: if op.exists(fname): raise ValueError('File %s exists. Use "overwrite=True" to ' 'overwrite it' % fname) # prepare container for data to save: to_save = [] # keep track of issues found in the labels duplicate_colors = [] invalid_colors = [] overlap = [] no_color = (-1, -1, -1, -1) no_color_rgb = (-1, -1, -1) for hemi, fname in zip(hemis, annot_fname): hemi_labels = [label for label in labels if label.hemi == hemi] n_hemi_labels = len(hemi_labels) if n_hemi_labels == 0: ctab = np.empty((0, 4), dtype=np.int32) ctab_rgb = ctab[:, :3] else: if sort: hemi_labels.sort(key=lambda label: label.name) # convert colors to 0-255 RGBA tuples hemi_colors = [no_color if label.color is None else tuple(int(round(255 * i)) for i in label.color) for label in hemi_labels] ctab = np.array(hemi_colors, dtype=np.int32) ctab_rgb = ctab[:, :3] # make color dict (for annot ID, only R, G and B count) labels_by_color = defaultdict(list) for label, color in zip(hemi_labels, ctab_rgb): labels_by_color[tuple(color)].append(label.name) # check label colors for color, names in labels_by_color.items(): if color == no_color_rgb: continue if color == (0, 0, 0): # we cannot have an all-zero color, otherw. e.g. tksurfer # refuses to read the parcellation warn('At least one label contains a color with, "r=0, ' 'g=0, b=0" value. Some FreeSurfer tools may fail ' 'to read the parcellation') if any(i > 255 for i in color): msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi)) invalid_colors.append(msg) if len(names) > 1: msg = "%s: %s (%s)" % (color, ', '.join(names), hemi) duplicate_colors.append(msg) # replace None values (labels with unspecified color) if labels_by_color[no_color_rgb]: default_colors = _n_colors(n_hemi_labels, bytes_=True, cmap=colormap) # keep track of colors known to be in hemi_colors : safe_color_i = 0 for i in range(n_hemi_labels): if ctab[i, 0] == -1: color = default_colors[i] # make sure to add no duplicate color while np.any(np.all(color[:3] == ctab_rgb, 1)): color = default_colors[safe_color_i] safe_color_i += 1 # assign the color ctab[i] = color # find number of vertices in surface if subject is not None and subjects_dir is not None: fpath = op.join(subjects_dir, subject, 'surf', '%s.white' % hemi) points, _ = read_surface(fpath) n_vertices = len(points) else: if len(hemi_labels) > 0: max_vert = max(np.max(label.vertices) for label in hemi_labels) n_vertices = max_vert + 1 else: n_vertices = 1 warn('Number of vertices in the surface could not be ' 'verified because the surface file could not be found; ' 'specify subject and subjects_dir parameters.') # Create annot and color table array to write annot = np.empty(n_vertices, dtype=np.int64) annot[:] = -1 # create the annotation ids from the colors annot_id_coding = np.array((1, 2 ** 8, 2 ** 16)) annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1)) for label, annot_id in zip(hemi_labels, annot_ids): # make sure the label is not overwriting another label if np.any(annot[label.vertices] != -1): other_ids = set(annot[label.vertices]) other_ids.discard(-1) other_indices = (annot_ids.index(i) for i in other_ids) other_names = (hemi_labels[i].name for i in other_indices) other_repr = ', '.join(other_names) msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr) overlap.append(msg) annot[label.vertices] = annot_id hemi_names = [label.name for label in hemi_labels] if None in hemi_names: msg = ("Found %i labels with no name. Writing annotation file" "requires all labels named" % (hemi_names.count(None))) # raise the error immediately rather than crash with an # uninformative error later (e.g. cannot join NoneType) raise ValueError(msg) # Assign unlabeled vertices to an "unknown" label unlabeled = (annot == -1) if np.any(unlabeled): msg = ("Assigning %i unlabeled vertices to " "'unknown-%s'" % (unlabeled.sum(), hemi)) logger.info(msg) # find an unused color (try shades of gray first) for i in range(1, 257): if not np.any(np.all((i, i, i) == ctab_rgb, 1)): break if i < 256: color = (i, i, i, 0) else: err = ("Need one free shade of gray for 'unknown' label. " "Please modify your label colors, or assign the " "unlabeled vertices to another label.") raise ValueError(err) # find the id annot_id = np.sum(annot_id_coding * color[:3]) # update data to write annot[unlabeled] = annot_id ctab = np.vstack((ctab, color)) hemi_names.append("unknown") # convert to FreeSurfer alpha values ctab[:, 3] = 255 - ctab[:, 3] # remove hemi ending in names hemi_names = [name[:-3] if name.endswith(hemi) else name for name in hemi_names] to_save.append((fname, annot, ctab, hemi_names)) issues = [] if duplicate_colors: msg = ("Some labels have the same color values (all labels in one " "hemisphere must have a unique color):") duplicate_colors.insert(0, msg) issues.append('\n'.join(duplicate_colors)) if invalid_colors: msg = ("Some labels have invalid color values (all colors should be " "RGBA tuples with values between 0 and 1)") invalid_colors.insert(0, msg) issues.append('\n'.join(invalid_colors)) if overlap: msg = ("Some labels occupy vertices that are also occupied by one or " "more other labels. Each vertex can only be occupied by a " "single label in *.annot files.") overlap.insert(0, msg) issues.append('\n'.join(overlap)) if issues: raise ValueError('\n\n'.join(issues)) # write it for fname, annot, ctab, hemi_names in to_save: logger.info(' writing %d labels to %s' % (len(hemi_names), fname)) _write_annot(fname, annot, ctab, hemi_names, table_name) @fill_doc def select_sources(subject, label, location='center', extent=0., grow_outside=True, subjects_dir=None, name=None, random_state=None, surf='white'): """Select sources from a label. Parameters ---------- %(subject)s label : instance of Label | str Define where the seed will be chosen. If str, can be 'lh' or 'rh', which correspond to left or right hemisphere, respectively. location : 'random' | 'center' | int Location to grow label from. If the location is an int, it represents the vertex number in the corresponding label. If it is a str, it can be either 'random' or 'center'. extent : float Extents (radius in mm) of the labels, i.e. maximum geodesic distance on the white matter surface from the seed. If 0, the resulting label will contain only one vertex. grow_outside : bool Let the region grow outside the original label where location was defined. %(subjects_dir)s name : None | str Assign name to the new label. %(random_state)s surf : str The surface used to simulated the label, defaults to the white surface. Returns ------- label : instance of Label The label that contains the selected sources. Notes ----- This function selects a region of interest on the cortical surface based on a label (or a hemisphere). The sources are selected by growing a region around a seed which is selected randomly, is the center of the label, or is a specific vertex. The selected vertices can extend beyond the initial provided label. This can be prevented by setting grow_outside to False. The selected sources are returned in the form of a new Label object. The values of the label contain the distance from the seed in millimeters. .. versionadded:: 0.18 """ # If label is a string, convert it to a label that contains the whole # hemisphere. if isinstance(label, str): _check_option('label', label, ['lh', 'rh']) surf_filename = op.join(subjects_dir, subject, 'surf', label + '.white') vertices, _ = read_surface(surf_filename) indices = np.arange(len(vertices), dtype=int) label = Label(indices, vertices, hemi=label) # Choose the seed according to the selected strategy. if isinstance(location, str): _check_option('location', location, ['center', 'random']) if location == 'center': seed = label.center_of_mass( subject, restrict_vertices=True, subjects_dir=subjects_dir, surf=surf) else: rng = check_random_state(random_state) seed = rng.choice(label.vertices) else: seed = label.vertices[location] hemi = 0 if label.hemi == 'lh' else 1 new_label = grow_labels(subject, seed, extent, hemi, subjects_dir)[0] # We override the name because grow_label automatically adds a -rh or -lh # to the given parameter. new_label.name = name # Restrict the new label to the vertices of the input label if needed. if not grow_outside: to_keep = np.array([v in label.vertices for v in new_label.vertices]) new_label = Label(new_label.vertices[to_keep], new_label.pos[to_keep], hemi=new_label.hemi, name=name, subject=subject) return new_label def find_pos_in_annot(pos, subject='fsaverage', annot='aparc+aseg', subjects_dir=None): """ Find name in atlas for given MRI coordinates. Parameters ---------- pos : ndarray, shape (3,) Vector of x,y,z coordinates in MRI space. subject : str MRI subject name. annot : str MRI volumetric atlas file name. Do not include the ``.mgz`` suffix. subjects_dir : path-like Path to MRI subjects directory. Returns ------- label : str Anatomical region name from atlas. Notes ----- .. versionadded:: 0.24 """ pos = np.asarray(pos, float) if pos.shape != (3,): raise ValueError( 'pos must be an array of shape (3,), ' f'got {pos.shape}') nibabel = _import_nibabel('read MRI parcellations') if subjects_dir is None: subjects_dir = get_subjects_dir(None) atlas_fname = os.path.join(subjects_dir, subject, 'mri', annot + '.mgz') parcellation_img = nibabel.load(atlas_fname) # Load freesurface atlas LUT lut_inv_dict = read_freesurfer_lut()[0] label_lut = {v: k for k, v in lut_inv_dict.items()} # Find voxel for dipole position mri_vox_t = np.linalg.inv(parcellation_img.header.get_vox2ras_tkr()) vox_dip_pos_f = apply_trans(mri_vox_t, pos) vox_dip_pos = np.rint(vox_dip_pos_f).astype(int) # Get voxel value and label from LUT vol_values = parcellation_img.get_fdata()[tuple(vox_dip_pos.T)] label = label_lut.get(vol_values, 'Unknown') return label
# Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import copy import os from os import path as op import shutil import numpy as np from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal import pytest import mne from mne import (pick_types, read_annotations, create_info, events_from_annotations, make_forward_solution) from mne.transforms import apply_trans from mne.io import read_raw_fif, read_raw_ctf, RawArray from mne.io.compensator import get_current_comp from mne.io.ctf.constants import CTF from mne.io.tests.test_raw import _test_raw_reader from mne.tests.test_annotations import _assert_annotations_equal from mne.utils import _clean_names, catch_logging, _stamp_to_dt from mne.datasets import testing, spm_face, brainstorm from mne.io.constants import FIFF ctf_dir = op.join(testing.data_path(download=False), 'CTF') ctf_fname_continuous = 'testdata_ctf.ds' ctf_fname_1_trial = 'testdata_ctf_short.ds' ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' ctf_fname_somato = 'somMDYO-18av.ds' ctf_fname_catch = 'catch-alp-good-f.ds' somato_fname = op.join( brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', 'subj001_somatosensory_20111109_01_AUX-f.ds' ) block_sizes = { ctf_fname_continuous: 12000, ctf_fname_1_trial: 4801, ctf_fname_2_trials: 12000, ctf_fname_discont: 1201, ctf_fname_somato: 313, ctf_fname_catch: 2500, } single_trials = ( ctf_fname_continuous, ctf_fname_1_trial, ) ctf_fnames = tuple(sorted(block_sizes.keys())) @pytest.mark.slowtest @testing.requires_testing_data def test_read_ctf(tmpdir): """Test CTF reader.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') # Create a dummy .eeg file so we can test our reading/application of it os.mkdir(op.join(temp_dir, 'randpos')) ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) picks = pick_types(raw.info, meg=False, eeg=True) pos = np.random.RandomState(42).randn(len(picks), 3) fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') # Create a bad file with open(fake_eeg_fname, 'wb') as fid: fid.write('foo\n'.encode('ascii')) pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) # Create a good file with open(fake_eeg_fname, 'wb') as fid: for ii, ch_num in enumerate(picks): args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( '%0.5f' % x for x in 100 * pos[ii]) # convert to cm fid.write(('\t'.join(args) + '\n').encode('ascii')) pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): raw = read_raw_ctf(ctf_eeg_fname) # read modified data pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, rtol=1e-5, atol=1e-5) assert (pos_read == pos_read_old).mean() < 0.1 shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) # Create a version with no hc, starting out *with* EEG pos (error) os.mkdir(op.join(temp_dir, 'nohc')) ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) os.remove(remove_base + '.hc') with pytest.warns(RuntimeWarning, match='MISC channel'): pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) os.remove(remove_base + '.eeg') shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) # All our files use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] for fname in use_fnames: raw_c = read_raw_fif(fname + '_raw.fif', preload=True) with pytest.warns(None): # sometimes matches "MISC channel" raw = read_raw_ctf(fname) # check info match assert_array_equal(raw.ch_names, raw_c.ch_names) assert_allclose(raw.times, raw_c.times) assert_allclose(raw._cals, raw_c._cals) assert (raw.info['meas_id']['version'] == raw_c.info['meas_id']['version'] + 1) for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], rtol=1e-4, atol=1e-7) # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. # Consider adding meas_date to below checks once this is addressed in # MNE-C for key in ('acq_pars', 'acq_stim', 'bads', 'ch_names', 'custom_ref_applied', 'description', 'events', 'experimenter', 'highpass', 'line_freq', 'lowpass', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info'): assert raw.info[key] == raw_c.info[key], key if op.basename(fname) not in single_trials: # We don't force buffer size to be smaller like MNE-C assert raw.buffer_size_sec == raw_c.buffer_size_sec assert len(raw.info['comps']) == len(raw_c.info['comps']) for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): for key in ('colcals', 'rowcals'): assert_allclose(c1[key], c2[key]) assert c1['save_calibrated'] == c2['save_calibrated'] for key in ('row_names', 'col_names', 'nrow', 'ncol'): assert_array_equal(c1['data'][key], c2['data'][key]) assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, rtol=1e-5) assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], raw_c.info['hpi_results'][0]['coord_trans']['trans'], rtol=1e-5, atol=1e-7) assert len(raw.info['chs']) == len(raw_c.info['chs']) for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', 'range', 'coord_frame', 'coil_type', 'logno'): if c1['ch_name'] == 'RMSP' and \ 'catch-alp-good-f' in fname and \ key in ('kind', 'unit', 'coord_frame', 'coil_type', 'logno'): continue # XXX see below... if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: # XXX MNE-C bug that this is not set assert c2[key] == FIFF.FIFFV_COIL_NONE continue assert c1[key] == c2[key], key for key in ('cal',): assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # XXX 2016/02/24: fixed bug with normal computation that used # to exist, once mne-C tools are updated we should update our FIF # conversion files, then the slices can go away (and the check # can be combined with that for "cal") for key in ('loc',): if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: continue if (c2[key][:3] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][:3] assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) if (c2[key][3:] == 0.).all(): check = [np.nan] * 3 else: check = c2[key][9:12] assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, err_msg='raw.info["chs"][%d][%s]' % (ii, key)) # Make sure all digitization points are in the MNE head coord frame for p in raw.info['dig']: assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ 'dig points must be in FIFF.FIFFV_COORD_HEAD' if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file raw.info['dig'] = raw.info['dig'][:-10] # XXX: Next test would fail because c-tools assign the fiducials from # CTF data as HPI. Should eventually clarify/unify with Matti. # assert_dig_allclose(raw.info, raw_c.info) # check data match raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) raw_read = read_raw_fif(out_fname) # so let's check tricky cases based on sample boundaries rng = np.random.RandomState(0) pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) assert bnd == raw._raw_extras[0]['block_size'] assert bnd == block_sizes[op.basename(fname)] slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None)) if len(raw.times) >= 2 * bnd: # at least two complete blocks slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)) for sl_time in slices: assert_allclose(raw[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations if 'testdata_ctf_short.ds' in fname: assert 'bad' in raw.annotations.description[0] assert_allclose(raw.annotations.onset, [2.15]) assert_allclose(raw.annotations.duration, [0.0225]) with pytest.raises(TypeError, match='path-like'): read_raw_ctf(1) with pytest.raises(FileNotFoundError, match='does not exist'): read_raw_ctf(ctf_fname_continuous + 'foo.ds') # test ignoring of system clock read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') with pytest.raises(ValueError, match='system_clock'): read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') @testing.requires_testing_data def test_rawctf_clean_names(): """Test RawCTF _clean_names method.""" # read test data with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), clean_names=True) test_channel_names = _clean_names(raw.ch_names) test_info_comps = copy.deepcopy(raw.info['comps']) # channel names should not be cleaned by default assert raw.ch_names != test_channel_names chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] assert chs_ch_names != test_channel_names for test_comp, comp in zip(test_info_comps, raw.info['comps']): for key in ('row_names', 'col_names'): assert not array_equal(_clean_names(test_comp['data'][key]), comp['data'][key]) # channel names should be cleaned if clean_names=True assert raw_cleaned.ch_names == test_channel_names for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): assert ch['ch_name'] == test_ch_name for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): for key in ('row_names', 'col_names'): assert _clean_names(test_comp['data'][key]) == comp['data'][key] @spm_face.requires_spm_data def test_read_spm_ctf(): """Test CTF reader with omitted samples.""" data_path = spm_face.data_path() raw_fname = op.join(data_path, 'MEG', 'spm', 'SPM_CTF_MEG_example_faces1_3D.ds') raw = read_raw_ctf(raw_fname) extras = raw._raw_extras[0] assert extras['n_samp'] == raw.n_times assert extras['n_samp'] != extras['n_samp_tot'] # Test that LPA, nasion and RPA are correct. coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) cardinals = {d['ident']: d['r'] for d in raw.info['dig']} assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord assert cardinals[1][1] < cardinals[2][1] # y coord assert cardinals[3][1] < cardinals[2][1] # y coord for key in cardinals.keys(): assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord @testing.requires_testing_data @pytest.mark.parametrize('comp_grade', [0, 1]) def test_saving_picked(tmpdir, comp_grade): """Test saving picked CTF instances.""" temp_dir = str(tmpdir) out_fname = op.join(temp_dir, 'test_py_raw.fif') raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) raw.crop(0, 1).load_data() assert raw.compensation_grade == get_current_comp(raw.info) == 0 assert len(raw.info['comps']) == 5 pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) raw.apply_gradient_compensation(comp_grade) with catch_logging() as log: raw_pick = raw.copy().pick_types(**pick_kwargs) assert len(raw.info['comps']) == 5 assert len(raw_pick.info['comps']) == 0 log = log.getvalue() assert 'Removing 5 compensators' in log raw_pick.save(out_fname, overwrite=True) # should work raw2 = read_raw_fif(out_fname) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 raw2 = read_raw_fif(out_fname, preload=True) assert (raw_pick.ch_names == raw2.ch_names) assert_array_equal(raw_pick.times, raw2.times) assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20) # atol is very small but > 0 @brainstorm.bst_raw.requires_bstraw_data def test_read_ctf_annotations(): """Test reading CTF marker file.""" EXPECTED_LATENCIES = np.array([ 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa 429278, 431668 # noqa ]) - 1 # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), info=create_info(ch_names=1, sfreq=1200.0)) raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) latencies = np.sort(events[:, 0]) assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6) @testing.requires_testing_data def test_read_ctf_annotations_smoke_test(): """Test reading CTF marker file. `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading of whatever is in the MarkerFile.mrk. """ EXPECTED_ONSET = [ 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, 4.57, 4.7125, 4.85583333, 4.99833333 ] fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') annot = read_annotations(fname) assert_allclose(annot.onset, EXPECTED_ONSET) raw = read_raw_ctf(fname) _assert_annotations_equal(raw.annotations, annot, 1e-6) def _read_res4_mag_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: ch['grad_order_no'] = 1 return res def _bad_res4_grad_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) for ch in res['chs']: if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: ch['grad_order_no'] = 1 break return res @testing.requires_testing_data def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch): """Test CTF reader with mag comps and bad comps.""" path = op.join(ctf_dir, ctf_fname_continuous) raw_orig = read_raw_ctf(path) assert raw_orig.compensation_grade == 0 monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) raw_mag_comp = read_raw_ctf(path) assert raw_mag_comp.compensation_grade == 0 sphere = mne.make_sphere_model() src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) assert src[0]['nuse'] == 26 for grade in (0, 1): raw_orig.apply_gradient_compensation(grade) raw_mag_comp.apply_gradient_compensation(grade) args = (None, src, sphere, True, False) fwd_orig = make_forward_solution(raw_orig.info, *args) fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) with pytest.raises(RuntimeError, match='inconsistent compensation grade'): read_raw_ctf(path)
rkmaddox/mne-python
mne/io/ctf/tests/test_ctf.py
mne/label.py
"""Helpers for components that manage entities.""" import asyncio from datetime import timedelta from itertools import chain import logging from homeassistant import config as conf_util from homeassistant.setup import async_prepare_setup_platform from homeassistant.const import ( ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE, ENTITY_MATCH_ALL, ) from homeassistant.core import callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_per_platform, discovery from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA from homeassistant.helpers.service import async_extract_entity_ids from homeassistant.loader import bind_hass, async_get_integration from homeassistant.util import slugify from .entity_platform import EntityPlatform # mypy: allow-untyped-defs, no-check-untyped-defs DEFAULT_SCAN_INTERVAL = timedelta(seconds=15) DATA_INSTANCES = "entity_components" @bind_hass async def async_update_entity(hass, entity_id): """Trigger an update for an entity.""" domain = entity_id.split(".", 1)[0] entity_comp = hass.data.get(DATA_INSTANCES, {}).get(domain) if entity_comp is None: logging.getLogger(__name__).warning( "Forced update failed. Component for %s not loaded.", entity_id ) return entity = entity_comp.get_entity(entity_id) if entity is None: logging.getLogger(__name__).warning( "Forced update failed. Entity %s not found.", entity_id ) return await entity.async_update_ha_state(True) class EntityComponent: """The EntityComponent manages platforms that manages entities. This class has the following responsibilities: - Process the configuration and set up a platform based component. - Manage the platforms and their entities. - Help extract the entities from a service call. - Maintain a group that tracks all platform entities. - Listen for discovery events for platforms related to the domain. """ def __init__( self, logger, domain, hass, scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None ): """Initialize an entity component.""" self.logger = logger self.hass = hass self.domain = domain self.scan_interval = scan_interval self.group_name = group_name self.config = None self._platforms = {domain: self._async_init_entity_platform(domain, None)} self.async_add_entities = self._platforms[domain].async_add_entities self.add_entities = self._platforms[domain].add_entities hass.data.setdefault(DATA_INSTANCES, {})[domain] = self @property def entities(self): """Return an iterable that returns all entities.""" return chain.from_iterable( platform.entities.values() for platform in self._platforms.values() ) def get_entity(self, entity_id): """Get an entity.""" for platform in self._platforms.values(): entity = platform.entities.get(entity_id) if entity is not None: return entity return None def setup(self, config): """Set up a full entity component. This doesn't block the executor to protect from deadlocks. """ self.hass.add_job(self.async_setup(config)) async def async_setup(self, config): """Set up a full entity component. Loads the platforms from the config and will listen for supported discovered platforms. This method must be run in the event loop. """ self.config = config # Look in config for Domain, Domain 2, Domain 3 etc and load them tasks = [] for p_type, p_config in config_per_platform(config, self.domain): tasks.append(self.async_setup_platform(p_type, p_config)) if tasks: await asyncio.wait(tasks) # Generic discovery listener for loading platform dynamically # Refer to: homeassistant.components.discovery.load_platform() async def component_platform_discovered(platform, info): """Handle the loading of a platform.""" await self.async_setup_platform(platform, {}, info) discovery.async_listen_platform( self.hass, self.domain, component_platform_discovered ) async def async_setup_entry(self, config_entry): """Set up a config entry.""" platform_type = config_entry.domain platform = await async_prepare_setup_platform( self.hass, # In future PR we should make hass_config part of the constructor # params. self.config or {}, self.domain, platform_type, ) if platform is None: return False key = config_entry.entry_id if key in self._platforms: raise ValueError("Config entry has already been setup!") self._platforms[key] = self._async_init_entity_platform( platform_type, platform, scan_interval=getattr(platform, "SCAN_INTERVAL", None), ) return await self._platforms[key].async_setup_entry(config_entry) async def async_unload_entry(self, config_entry): """Unload a config entry.""" key = config_entry.entry_id platform = self._platforms.pop(key, None) if platform is None: raise ValueError("Config entry was never loaded!") await platform.async_reset() return True async def async_extract_from_service(self, service, expand_group=True): """Extract all known and available entities from a service call. Will return all entities if no entities specified in call. Will return an empty list if entities specified but unknown. This method must be run in the event loop. """ data_ent_id = service.data.get(ATTR_ENTITY_ID) if data_ent_id in (None, ENTITY_MATCH_ALL): if data_ent_id is None: self.logger.warning( "Not passing an entity ID to a service to target all " "entities is deprecated. Update your call to %s.%s to be " "instead: entity_id: %s", service.domain, service.service, ENTITY_MATCH_ALL, ) return [entity for entity in self.entities if entity.available] entity_ids = await async_extract_entity_ids(self.hass, service, expand_group) return [ entity for entity in self.entities if entity.available and entity.entity_id in entity_ids ] @callback def async_register_entity_service(self, name, schema, func, required_features=None): """Register an entity service.""" if isinstance(schema, dict): schema = ENTITY_SERVICE_SCHEMA.extend(schema) async def handle_service(call): """Handle the service.""" service_name = f"{self.domain}.{name}" await self.hass.helpers.service.entity_service_call( self._platforms.values(), func, call, service_name, required_features ) self.hass.services.async_register(self.domain, name, handle_service, schema) async def async_setup_platform( self, platform_type, platform_config, discovery_info=None ): """Set up a platform for this component.""" if self.config is None: raise RuntimeError("async_setup needs to be called first") platform = await async_prepare_setup_platform( self.hass, self.config, self.domain, platform_type ) if platform is None: return # Use config scan interval, fallback to platform if none set scan_interval = platform_config.get( CONF_SCAN_INTERVAL, getattr(platform, "SCAN_INTERVAL", None) ) entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE) key = (platform_type, scan_interval, entity_namespace) if key not in self._platforms: self._platforms[key] = self._async_init_entity_platform( platform_type, platform, scan_interval, entity_namespace ) await self._platforms[key].async_setup(platform_config, discovery_info) @callback def _async_update_group(self): """Set up and/or update component group. This method must be run in the event loop. """ if self.group_name is None: return ids = [ entity.entity_id for entity in sorted( self.entities, key=lambda entity: entity.name or entity.entity_id ) ] self.hass.async_create_task( self.hass.services.async_call( "group", "set", dict( object_id=slugify(self.group_name), name=self.group_name, visible=False, entities=ids, ), ) ) async def _async_reset(self): """Remove entities and reset the entity component to initial values. This method must be run in the event loop. """ tasks = [platform.async_reset() for platform in self._platforms.values()] if tasks: await asyncio.wait(tasks) self._platforms = {self.domain: self._platforms[self.domain]} self.config = None if self.group_name is not None: await self.hass.services.async_call( "group", "remove", dict(object_id=slugify(self.group_name)) ) async def async_remove_entity(self, entity_id): """Remove an entity managed by one of the platforms.""" for platform in self._platforms.values(): if entity_id in platform.entities: await platform.async_remove_entity(entity_id) async def async_prepare_reload(self): """Prepare reloading this entity component. This method must be run in the event loop. """ try: conf = await conf_util.async_hass_config_yaml(self.hass) except HomeAssistantError as err: self.logger.error(err) return None integration = await async_get_integration(self.hass, self.domain) conf = await conf_util.async_process_component_config( self.hass, conf, integration ) if conf is None: return None await self._async_reset() return conf def _async_init_entity_platform( self, platform_type, platform, scan_interval=None, entity_namespace=None ): """Initialize an entity platform.""" if scan_interval is None: scan_interval = self.scan_interval return EntityPlatform( hass=self.hass, logger=self.logger, domain=self.domain, platform_name=platform_type, platform=platform, scan_interval=scan_interval, entity_namespace=entity_namespace, async_entities_added_callback=self._async_update_group, )
"""Tests for the storage helper.""" import asyncio from datetime import timedelta import json from unittest.mock import patch, Mock import pytest from homeassistant.const import EVENT_HOMEASSISTANT_STOP from homeassistant.helpers import storage from homeassistant.util import dt from tests.common import async_fire_time_changed, mock_coro MOCK_VERSION = 1 MOCK_KEY = "storage-test" MOCK_DATA = {"hello": "world"} MOCK_DATA2 = {"goodbye": "cruel world"} @pytest.fixture def store(hass): """Fixture of a store that prevents writing on HASS stop.""" yield storage.Store(hass, MOCK_VERSION, MOCK_KEY) async def test_loading(hass, store): """Test we can save and load data.""" await store.async_save(MOCK_DATA) data = await store.async_load() assert data == MOCK_DATA async def test_custom_encoder(hass): """Test we can save and load data.""" class JSONEncoder(json.JSONEncoder): """Mock JSON encoder.""" def default(self, o): """Mock JSON encode method.""" return "9" store = storage.Store(hass, MOCK_VERSION, MOCK_KEY, encoder=JSONEncoder) await store.async_save(Mock()) data = await store.async_load() assert data == "9" async def test_loading_non_existing(hass, store): """Test we can save and load data.""" with patch("homeassistant.util.json.open", side_effect=FileNotFoundError): data = await store.async_load() assert data is None async def test_loading_parallel(hass, store, hass_storage, caplog): """Test we can save and load data.""" hass_storage[store.key] = {"version": MOCK_VERSION, "data": MOCK_DATA} results = await asyncio.gather(store.async_load(), store.async_load()) assert results[0] is MOCK_DATA assert results[1] is MOCK_DATA assert caplog.text.count("Loading data for {}".format(store.key)) async def test_saving_with_delay(hass, store, hass_storage): """Test saving data after a delay.""" store.async_delay_save(lambda: MOCK_DATA, 1) assert store.key not in hass_storage async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1)) await hass.async_block_till_done() assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": MOCK_DATA, } async def test_saving_on_stop(hass, hass_storage): """Test delayed saves trigger when we quit Home Assistant.""" store = storage.Store(hass, MOCK_VERSION, MOCK_KEY) store.async_delay_save(lambda: MOCK_DATA, 1) assert store.key not in hass_storage hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP) await hass.async_block_till_done() assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": MOCK_DATA, } async def test_loading_while_delay(hass, store, hass_storage): """Test we load new data even if not written yet.""" await store.async_save({"delay": "no"}) assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": {"delay": "no"}, } store.async_delay_save(lambda: {"delay": "yes"}, 1) assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": {"delay": "no"}, } data = await store.async_load() assert data == {"delay": "yes"} async def test_writing_while_writing_delay(hass, store, hass_storage): """Test a write while a write with delay is active.""" store.async_delay_save(lambda: {"delay": "yes"}, 1) assert store.key not in hass_storage await store.async_save({"delay": "no"}) assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": {"delay": "no"}, } async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1)) await hass.async_block_till_done() assert hass_storage[store.key] == { "version": MOCK_VERSION, "key": MOCK_KEY, "data": {"delay": "no"}, } data = await store.async_load() assert data == {"delay": "no"} async def test_migrator_no_existing_config(hass, store, hass_storage): """Test migrator with no existing config.""" with patch("os.path.isfile", return_value=False), patch.object( store, "async_load", return_value=mock_coro({"cur": "config"}) ): data = await storage.async_migrator(hass, "old-path", store) assert data == {"cur": "config"} assert store.key not in hass_storage async def test_migrator_existing_config(hass, store, hass_storage): """Test migrating existing config.""" with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove: data = await storage.async_migrator( hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"} ) assert len(mock_remove.mock_calls) == 1 assert data == {"old": "config"} assert hass_storage[store.key] == { "key": MOCK_KEY, "version": MOCK_VERSION, "data": data, } async def test_migrator_transforming_config(hass, store, hass_storage): """Test migrating config to new format.""" async def old_conf_migrate_func(old_config): """Migrate old config to new format.""" return {"new": old_config["old"]} with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove: data = await storage.async_migrator( hass, "old-path", store, old_conf_migrate_func=old_conf_migrate_func, old_conf_load_func=lambda _: {"old": "config"}, ) assert len(mock_remove.mock_calls) == 1 assert data == {"new": "config"} assert hass_storage[store.key] == { "key": MOCK_KEY, "version": MOCK_VERSION, "data": data, }
joopert/home-assistant
tests/helpers/test_storage.py
homeassistant/helpers/entity_component.py
"""Global fixtures for depot tests""" import fauxfactory import pytest from wrapanapi import VmState from cfme.utils.conf import cfme_data from cfme.utils.log import logger from cfme.utils.net import find_pingable from cfme.utils.net import find_pingable_ipv6 from cfme.utils.net import pick_responding_ip from cfme.utils.virtual_machines import deploy_template from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for FTP_PORT = 21 @pytest.fixture(scope="module") def depot_machine_ip(request, appliance): """ Deploy vm for depot test This fixture uses for deploy vm on provider from yaml and then receive it's ip After test run vm deletes from provider """ try: # use long-test name so it has a longer life before automatic cleanup data = cfme_data.log_db_operations vm = deploy_template( data.log_db_depot_template.provider, fauxfactory.gen_alphanumeric(26, start="long-test-depot-"), template_name=data.log_db_depot_template.template_name ) vm.ensure_state(VmState.RUNNING) except AttributeError: msg = 'Missing some yaml information necessary to deploy depot VM' logger.exception(msg) pytest.skip(msg) try: # TODO It would be better to use retry_connect here, but this requires changes to other # fixtures. found_ip = pick_responding_ip(lambda: vm.all_ips, FTP_PORT, 300, 5, 10) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) pytest.skip(msg) yield found_ip vm.cleanup() @pytest.fixture(scope="module") def depot_machine_ipv4_and_ipv6(request, appliance): """ Deploy vm for depot test This fixture is used for deploying a vm on a provider from the yamls and getting its ip (both ipv4 and ipv6) After test run vm deletes from provider """ try: # use long-test name so it has a longer life before automatic cleanup data = cfme_data.log_db_operations vm = deploy_template( data.log_db_depot_template.provider, f"long-test-depot-{fauxfactory.gen_alphanumeric()}", template_name=data.log_db_depot_template.template_name ) vm.ensure_state(VmState.RUNNING) except AttributeError: msg = 'Missing some yaml information necessary to deploy depot VM' logger.exception(msg) pytest.skip(msg) try: ipv4, _ = wait_for( find_pingable, func_args=[vm, False], fail_condition=None, delay=5, num_sec=300 ) ipv6, _ = wait_for( find_pingable_ipv6, func_args=[vm], fail_condition=None, delay=5, num_sec=300 ) except TimedOutError: msg = 'Timed out waiting for reachable depot VM IP' logger.exception(msg) pytest.skip(msg) yield ipv4, ipv6 vm.cleanup()
import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [test_requirements.report] @pytest.fixture(scope="module") def report(appliance): saved_report = appliance.collections.reports.instantiate( type="Configuration Management", subtype="Virtual Machines", menu_name="Hardware Information for VMs", ).queue(wait_for_finish=True) yield saved_report saved_report.delete(cancel=False) @pytest.mark.parametrize("filetype", ["txt", "csv", "pdf"]) @pytest.mark.provider([InfraProvider], selector=ONE, scope="module") def test_download_report(setup_provider_modscope, report, filetype): """Download the report as a file. Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/20h """ if filetype == "pdf": view = navigate_to(report, "Details") # since multiple window handling is not possible, we just assert that the option is enabled. assert view.download.item_enabled("Print or export as PDF") else: report.download(filetype)
nachandr/cfme_tests
cfme/tests/intelligence/test_download_report.py
cfme/fixtures/depot.py
from widgetastic.widget import View from widgetastic_patternfly import AboutModal from cfme.exceptions import ItemNotFound from cfme.utils.appliance.implementations.ui import navigate_to # MIQ/CFME about field names VERSION = 'Version' SERVER = 'Server Name' USER = 'User Name' ROLE = 'User Role' BROWSER = 'Browser' BROWSER_VERSION = 'Browser Version' BROWSER_OS = 'Browser OS' ZONE = "Zone" REGION = "Region" class MIQAboutModal(AboutModal): """Override some locators that MIQ mangles""" CLOSE_LOC = './/div[@class="modal-header"]/button[@class="close"]' class AboutView(View): """ The view for the about modal """ @property def is_displayed(self): return self.modal.is_open modal = MIQAboutModal() # 5.10 has id, 5.11 does not, wt.pf doesn't need it. def get_detail(field, server): """ Open the about modal and fetch the value for one of the fields 'title' and 'trademark' fields are allowed and get the header/footer values Raises ItemNotFound if the field isn't in the about modal :param field: string label for the detail field :return: string value from the requested field """ view = navigate_to(server, 'About') try: if field.lower() in ['title', 'trademark']: return getattr(view.modal, field.lower()) else: # this is AboutModal.items function, TODO rename return view.modal.items()[field] except (KeyError, AttributeError): raise ItemNotFound(f'No field named {field} found in "About" modal.') finally: # close since its a blocking modal and will break further navigation view.modal.close()
import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [test_requirements.report] @pytest.fixture(scope="module") def report(appliance): saved_report = appliance.collections.reports.instantiate( type="Configuration Management", subtype="Virtual Machines", menu_name="Hardware Information for VMs", ).queue(wait_for_finish=True) yield saved_report saved_report.delete(cancel=False) @pytest.mark.parametrize("filetype", ["txt", "csv", "pdf"]) @pytest.mark.provider([InfraProvider], selector=ONE, scope="module") def test_download_report(setup_provider_modscope, report, filetype): """Download the report as a file. Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/20h """ if filetype == "pdf": view = navigate_to(report, "Details") # since multiple window handling is not possible, we just assert that the option is enabled. assert view.download.item_enabled("Print or export as PDF") else: report.download(filetype)
nachandr/cfme_tests
cfme/tests/intelligence/test_download_report.py
cfme/configure/about.py
import attr from widgetastic.exceptions import NoSuchElementException from wrapanapi.systems import VMWareSystem from cfme.common.candu_views import VMUtilizationView from cfme.common.provider import DefaultEndpoint from cfme.common.provider import DefaultEndpointForm from cfme.common.provider import VMRCEndpoint from cfme.exceptions import ItemNotFound from cfme.infrastructure.provider import InfraProvider from cfme.services.catalogs.catalog_items import VMwareCatalogItem from widgetastic_manageiq import LineChart class VirtualCenterEndpoint(DefaultEndpoint): pass class VirtualCenterEndpointForm(DefaultEndpointForm): pass class VirtualCenterVMUtilizationView(VMUtilizationView): """A VM Utilization view for virtual center providers""" vm_cpu = LineChart(id='miq_chart_parent_candu_0') vm_cpu_state = LineChart(id='miq_chart_parent_candu_1') vm_memory = LineChart(id='miq_chart_parent_candu_2') vm_disk = LineChart(id='miq_chart_parent_candu_3') vm_network = LineChart(id='miq_chart_parent_candu_4') @attr.s(eq=False) class VMwareProvider(InfraProvider): catalog_item_type = VMwareCatalogItem vm_utilization_view = VirtualCenterVMUtilizationView type_name = "virtualcenter" mgmt_class = VMWareSystem db_types = ["Vmware::InfraManager"] endpoints_form = VirtualCenterEndpointForm ems_pretty_name = 'VMware vCenter' discover_dict = {"vmware": True} settings_key = 'ems_vmware' # xpath locators for elements, to be used by selenium _console_connection_status_element = '//*[@id="connection-status"]|//*[@id="noVNC_status"]' _canvas_element = ('(//*[@id="remote-console" or @id="wmksContainer"]/canvas|' '//*[@id="noVNC_canvas"])') _ctrl_alt_del_xpath = '(//*[@id="ctrlaltdel"]|//*[@id="sendCtrlAltDelButton"])' _fullscreen_xpath = '//*[@id="fullscreen"]' bad_credentials_error_msg = 'Cannot complete login due to an incorrect user name or password.' log_name = 'vim' _console_type = '//*[@id="console-type"]' ems_events = [ ('vm_create', {'event_type': 'VmDeployedEvent', 'dest_vm_or_template_id': None}), ('vm_stop', {'event_type': 'VmPoweredOffEvent', 'vm_or_template_id': None}), ('vm_start', {'event_type': 'VmPoweredOnEvent', 'vm_or_template_id': None}), ('vm_delete', {'event_type': 'VmRemovedEvent', 'vm_or_template_id': None}) ] def deployment_helper(self, deploy_args): """ Used in utils.virtual_machines """ # Called within a dictionary update. Since we want to remove key/value pairs, return the # entire dictionary deploy_args.pop('username', None) deploy_args.pop('password', None) if "allowed_datastores" not in deploy_args and "allowed_datastores" in self.data: deploy_args['allowed_datastores'] = self.data['allowed_datastores'] return deploy_args @classmethod def from_config(cls, prov_config, prov_key, appliance=None): appliance = appliance or cls.appliance endpoints = { VirtualCenterEndpoint.name: VirtualCenterEndpoint(**prov_config['endpoints']['default']) } vmrc_endpoint_config = prov_config["endpoints"].get(VMRCEndpoint.name, {}) if vmrc_endpoint_config: endpoints[VMRCEndpoint.name] = VMRCEndpoint(**vmrc_endpoint_config) if prov_config.get('discovery_range'): start_ip = prov_config['discovery_range']['start'] end_ip = prov_config['discovery_range']['end'] else: start_ip = end_ip = prov_config.get('ipaddress') return appliance.collections.infra_providers.instantiate( prov_class=cls, name=prov_config['name'], endpoints=endpoints, zone=prov_config['server_zone'], key=prov_key, start_ip=start_ip, end_ip=end_ip) @property def view_value_mapping(self): return dict(name=self.name, prov_type='VMware vCenter') # Following methods will only work if the remote console window is open # and if selenium focused on it. These will not work if the selenium is # focused on Appliance window. def _try_element_lookup(self, xpath): try: return self.appliance.browser.widgetastic.selenium.find_element_by_xpath(xpath) except NoSuchElementException: raise ItemNotFound("Element not found on screen, is current focus on console window?") def get_console_connection_status(self): return self._try_element_lookup(self._console_connection_status_element).text def get_remote_console_canvas(self): return self._try_element_lookup(self._canvas_element) def get_console_ctrl_alt_del_btn(self): return self._try_element_lookup(self._ctrl_alt_del_xpath) def get_console_fullscreen_btn(self): return self._try_element_lookup(self._fullscreen_xpath) def get_console_type_name(self): return self._try_element_lookup(self._console_type).text
import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [test_requirements.report] @pytest.fixture(scope="module") def report(appliance): saved_report = appliance.collections.reports.instantiate( type="Configuration Management", subtype="Virtual Machines", menu_name="Hardware Information for VMs", ).queue(wait_for_finish=True) yield saved_report saved_report.delete(cancel=False) @pytest.mark.parametrize("filetype", ["txt", "csv", "pdf"]) @pytest.mark.provider([InfraProvider], selector=ONE, scope="module") def test_download_report(setup_provider_modscope, report, filetype): """Download the report as a file. Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/20h """ if filetype == "pdf": view = navigate_to(report, "Details") # since multiple window handling is not possible, we just assert that the option is enabled. assert view.download.item_enabled("Print or export as PDF") else: report.download(filetype)
nachandr/cfme_tests
cfme/tests/intelligence/test_download_report.py
cfme/infrastructure/provider/virtualcenter.py
import attr from cfme.utils.appliance.plugin import AppliancePlugin from cfme.utils.appliance.plugin import AppliancePluginException from cfme.utils.log import logger_wrap from cfme.utils.quote import quote from cfme.utils.wait import wait_for class SystemdException(AppliancePluginException): pass @attr.s class SystemdService(AppliancePlugin): unit_name = attr.ib(type=str) @logger_wrap('SystemdService command runner: {}') def _run_service_command( self, command, expected_exit_code=None, unit_name=None, log_callback=None ): """Wrapper around running the command and raising exception on unexpected code Args: command: string command for systemd (stop, start, restart, etc) expected_exit_code: the exit code to expect, otherwise raise unit_name: optional unit name, defaults to self.unit_name attribute log_callback: logger to log against Raises: SystemdException: When expected_exit_code is not matched """ unit = self.unit_name if unit_name is None else unit_name with self.appliance.ssh_client as ssh: cmd = 'systemctl {} {}'.format(quote(command), quote(unit)) log_callback(f'Running {cmd}') result = ssh.run_command(cmd, container=self.appliance.ansible_pod_name) if expected_exit_code is not None and result.rc != expected_exit_code: # TODO: Bring back address msg = 'Failed to {} {}\nError: {}'.format( command, self.unit_name, result.output) if log_callback: log_callback(msg) else: self.logger.error(msg) raise SystemdException(msg) return result def stop(self, log_callback=None): return self._run_service_command( 'stop', expected_exit_code=0, log_callback=log_callback ) def start(self, log_callback=None): return self._run_service_command( 'start', expected_exit_code=0, log_callback=log_callback ) def restart(self, log_callback=None): return self._run_service_command( 'restart', expected_exit_code=0, log_callback=log_callback ) def reload(self, log_callback=None): return self._run_service_command( 'reload', expected_exit_code=0, log_callback=log_callback ) def enable(self, log_callback=None): return self._run_service_command( 'enable', expected_exit_code=0, log_callback=log_callback ) @property def enabled(self): return self._run_service_command('is-enabled').rc == 0 @property def is_active(self): return self._run_service_command('is-active').rc == 0 @property def running(self): return self._run_service_command("status").rc == 0 def wait_for_running(self, timeout=600): result, wait = wait_for( lambda: self.running, num_sec=timeout, fail_condition=False, delay=5, ) return result def daemon_reload(self, log_callback=None): """Call daemon-reload, no unit name for this""" return self._run_service_command( command='daemon-reload', expected_exit_code=0, unit_name='', log_callback=log_callback )
import pytest from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.markers.env_markers.provider import ONE from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [test_requirements.report] @pytest.fixture(scope="module") def report(appliance): saved_report = appliance.collections.reports.instantiate( type="Configuration Management", subtype="Virtual Machines", menu_name="Hardware Information for VMs", ).queue(wait_for_finish=True) yield saved_report saved_report.delete(cancel=False) @pytest.mark.parametrize("filetype", ["txt", "csv", "pdf"]) @pytest.mark.provider([InfraProvider], selector=ONE, scope="module") def test_download_report(setup_provider_modscope, report, filetype): """Download the report as a file. Polarion: assignee: pvala casecomponent: Reporting caseimportance: high initialEstimate: 1/20h """ if filetype == "pdf": view = navigate_to(report, "Details") # since multiple window handling is not possible, we just assert that the option is enabled. assert view.download.item_enabled("Print or export as PDF") else: report.download(filetype)
nachandr/cfme_tests
cfme/tests/intelligence/test_download_report.py
cfme/utils/appliance/services.py
"""Support for Traccar.""" from aiohttp import web import voluptuous as vol from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER from homeassistant.const import CONF_WEBHOOK_ID, HTTP_OK, HTTP_UNPROCESSABLE_ENTITY from homeassistant.helpers import config_entry_flow import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import ( ATTR_ACCURACY, ATTR_ALTITUDE, ATTR_BATTERY, ATTR_BEARING, ATTR_ID, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_SPEED, ATTR_TIMESTAMP, DOMAIN, ) TRACKER_UPDATE = f"{DOMAIN}_tracker_update" DEFAULT_ACCURACY = HTTP_OK DEFAULT_BATTERY = -1 def _id(value: str) -> str: """Coerce id by removing '-'.""" return value.replace("-", "") WEBHOOK_SCHEMA = vol.Schema( { vol.Required(ATTR_ID): vol.All(cv.string, _id), vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, vol.Optional(ATTR_ACCURACY, default=DEFAULT_ACCURACY): vol.Coerce(float), vol.Optional(ATTR_ALTITUDE): vol.Coerce(float), vol.Optional(ATTR_BATTERY, default=DEFAULT_BATTERY): vol.Coerce(float), vol.Optional(ATTR_BEARING): vol.Coerce(float), vol.Optional(ATTR_SPEED): vol.Coerce(float), vol.Optional(ATTR_TIMESTAMP): vol.Coerce(int), } ) async def async_setup(hass, hass_config): """Set up the Traccar component.""" hass.data[DOMAIN] = {"devices": set(), "unsub_device_tracker": {}} return True async def handle_webhook(hass, webhook_id, request): """Handle incoming webhook with Traccar request.""" try: data = WEBHOOK_SCHEMA(dict(request.query)) except vol.MultipleInvalid as error: return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY) attrs = { ATTR_ALTITUDE: data.get(ATTR_ALTITUDE), ATTR_BEARING: data.get(ATTR_BEARING), ATTR_SPEED: data.get(ATTR_SPEED), } device = data[ATTR_ID] async_dispatcher_send( hass, TRACKER_UPDATE, device, data[ATTR_LATITUDE], data[ATTR_LONGITUDE], data[ATTR_BATTERY], data[ATTR_ACCURACY], attrs, ) return web.Response(text=f"Setting location for {device}", status=HTTP_OK) async def async_setup_entry(hass, entry): """Configure based on config entry.""" hass.components.webhook.async_register( DOMAIN, "Traccar", entry.data[CONF_WEBHOOK_ID], handle_webhook ) hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER) ) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)() await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER) return True async_remove_entry = config_entry_flow.webhook_async_remove_entry
"""Define tests for the Ambient PWS config flow.""" import json from unittest.mock import patch import aioambient import pytest from homeassistant import data_entry_flow from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_KEY from tests.common import MockConfigEntry, load_fixture, mock_coro @pytest.fixture def get_devices_response(): """Define a fixture for a successful /devices response.""" return mock_coro() @pytest.fixture def mock_aioambient(get_devices_response): """Mock the aioambient library.""" with patch("homeassistant.components.ambient_station.config_flow.Client") as Client: Client().api.get_devices.return_value = get_devices_response yield Client async def test_duplicate_error(hass): """Test that errors are shown when duplicates are added.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} MockConfigEntry( domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" @pytest.mark.parametrize( "get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)] ) async def test_invalid_api_key(hass, mock_aioambient): """Test that an invalid API/App Key throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "invalid_key"} @pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])]) async def test_no_devices(hass, mock_aioambient): """Test that an account with no associated devices throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "no_devices"} async def test_show_form(hass): """Test that the form is served with no input.""" flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_import(hass, mock_aioambient): """Test that the import step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_import(import_config=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", } @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_user(hass, mock_aioambient): """Test that the user step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", }
tboyce021/home-assistant
tests/components/ambient_station/test_config_flow.py
homeassistant/components/traccar/__init__.py
"""Support for Tibber sensors.""" import asyncio from datetime import timedelta import logging from random import randrange import aiohttp from homeassistant.components.sensor import DEVICE_CLASS_POWER from homeassistant.const import POWER_WATT from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle, dt as dt_util from .const import DOMAIN as TIBBER_DOMAIN, MANUFACTURER _LOGGER = logging.getLogger(__name__) ICON = "mdi:currency-usd" SCAN_INTERVAL = timedelta(minutes=1) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5) PARALLEL_UPDATES = 0 async def async_setup_entry(hass, entry, async_add_entities): """Set up the Tibber sensor.""" tibber_connection = hass.data.get(TIBBER_DOMAIN) dev = [] for home in tibber_connection.get_homes(only_active=False): try: await home.update_info() except asyncio.TimeoutError as err: _LOGGER.error("Timeout connecting to Tibber home: %s ", err) raise PlatformNotReady() from err except aiohttp.ClientError as err: _LOGGER.error("Error connecting to Tibber home: %s ", err) raise PlatformNotReady() from err if home.has_active_subscription: dev.append(TibberSensorElPrice(home)) if home.has_real_time_consumption: dev.append(TibberSensorRT(home)) async_add_entities(dev, True) class TibberSensor(Entity): """Representation of a generic Tibber sensor.""" def __init__(self, tibber_home): """Initialize the sensor.""" self._tibber_home = tibber_home self._last_updated = None self._state = None self._is_available = False self._device_state_attributes = {} self._name = tibber_home.info["viewer"]["home"]["appNickname"] if self._name is None: self._name = tibber_home.info["viewer"]["home"]["address"].get( "address1", "" ) self._spread_load_constant = randrange(3600) @property def device_state_attributes(self): """Return the state attributes.""" return self._device_state_attributes @property def model(self): """Return the model of the sensor.""" return None @property def state(self): """Return the state of the device.""" return self._state @property def device_id(self): """Return the ID of the physical device this sensor is part of.""" home = self._tibber_home.info["viewer"]["home"] return home["meteringPointData"]["consumptionEan"] @property def device_info(self): """Return the device_info of the device.""" device_info = { "identifiers": {(TIBBER_DOMAIN, self.device_id)}, "name": self.name, "manufacturer": MANUFACTURER, } if self.model is not None: device_info["model"] = self.model return device_info class TibberSensorElPrice(TibberSensor): """Representation of a Tibber sensor for el price.""" async def async_update(self): """Get the latest data and updates the states.""" now = dt_util.now() if ( self._tibber_home.current_price_total and self._last_updated and self._last_updated.hour == now.hour and self._tibber_home.last_data_timestamp ): return if ( not self._tibber_home.last_data_timestamp or (self._tibber_home.last_data_timestamp - now).total_seconds() < 5 * 3600 + self._spread_load_constant or not self._is_available ): _LOGGER.debug("Asking for new data") await self._fetch_data() res = self._tibber_home.current_price_data() self._state, price_level, self._last_updated = res self._device_state_attributes["price_level"] = price_level attrs = self._tibber_home.current_attributes() self._device_state_attributes.update(attrs) self._is_available = self._state is not None @property def available(self): """Return True if entity is available.""" return self._is_available @property def name(self): """Return the name of the sensor.""" return f"Electricity price {self._name}" @property def model(self): """Return the model of the sensor.""" return "Price Sensor" @property def icon(self): """Return the icon to use in the frontend.""" return ICON @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return self._tibber_home.price_unit @property def unique_id(self): """Return a unique ID.""" return self.device_id @Throttle(MIN_TIME_BETWEEN_UPDATES) async def _fetch_data(self): _LOGGER.debug("Fetching data") try: await self._tibber_home.update_info_and_price_info() except (asyncio.TimeoutError, aiohttp.ClientError): return data = self._tibber_home.info["viewer"]["home"] self._device_state_attributes["app_nickname"] = data["appNickname"] self._device_state_attributes["grid_company"] = data["meteringPointData"][ "gridCompany" ] self._device_state_attributes["estimated_annual_consumption"] = data[ "meteringPointData" ]["estimatedAnnualConsumption"] class TibberSensorRT(TibberSensor): """Representation of a Tibber sensor for real time consumption.""" async def async_added_to_hass(self): """Start listen for real time data.""" await self._tibber_home.rt_subscribe(self.hass.loop, self._async_callback) async def _async_callback(self, payload): """Handle received data.""" errors = payload.get("errors") if errors: _LOGGER.error(errors[0]) return data = payload.get("data") if data is None: return live_measurement = data.get("liveMeasurement") if live_measurement is None: return self._state = live_measurement.pop("power", None) for key, value in live_measurement.items(): if value is None: continue self._device_state_attributes[key] = value self.async_write_ha_state() @property def available(self): """Return True if entity is available.""" return self._tibber_home.rt_subscription_running @property def model(self): """Return the model of the sensor.""" return "Tibber Pulse" @property def name(self): """Return the name of the sensor.""" return f"Real time consumption {self._name}" @property def should_poll(self): """Return the polling state.""" return False @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return POWER_WATT @property def unique_id(self): """Return a unique ID.""" return f"{self.device_id}_rt_consumption" @property def device_class(self): """Return the device class of the sensor.""" return DEVICE_CLASS_POWER
"""Define tests for the Ambient PWS config flow.""" import json from unittest.mock import patch import aioambient import pytest from homeassistant import data_entry_flow from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_KEY from tests.common import MockConfigEntry, load_fixture, mock_coro @pytest.fixture def get_devices_response(): """Define a fixture for a successful /devices response.""" return mock_coro() @pytest.fixture def mock_aioambient(get_devices_response): """Mock the aioambient library.""" with patch("homeassistant.components.ambient_station.config_flow.Client") as Client: Client().api.get_devices.return_value = get_devices_response yield Client async def test_duplicate_error(hass): """Test that errors are shown when duplicates are added.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} MockConfigEntry( domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" @pytest.mark.parametrize( "get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)] ) async def test_invalid_api_key(hass, mock_aioambient): """Test that an invalid API/App Key throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "invalid_key"} @pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])]) async def test_no_devices(hass, mock_aioambient): """Test that an account with no associated devices throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "no_devices"} async def test_show_form(hass): """Test that the form is served with no input.""" flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_import(hass, mock_aioambient): """Test that the import step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_import(import_config=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", } @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_user(hass, mock_aioambient): """Test that the user step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", }
tboyce021/home-assistant
tests/components/ambient_station/test_config_flow.py
homeassistant/components/tibber/sensor.py
"""Config flow for IFTTT.""" from homeassistant.helpers import config_entry_flow from .const import DOMAIN config_entry_flow.register_webhook_flow( DOMAIN, "IFTTT Webhook", { "applet_url": "https://ifttt.com/maker_webhooks", "docs_url": "https://www.home-assistant.io/integrations/ifttt/", }, )
"""Define tests for the Ambient PWS config flow.""" import json from unittest.mock import patch import aioambient import pytest from homeassistant import data_entry_flow from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_KEY from tests.common import MockConfigEntry, load_fixture, mock_coro @pytest.fixture def get_devices_response(): """Define a fixture for a successful /devices response.""" return mock_coro() @pytest.fixture def mock_aioambient(get_devices_response): """Mock the aioambient library.""" with patch("homeassistant.components.ambient_station.config_flow.Client") as Client: Client().api.get_devices.return_value = get_devices_response yield Client async def test_duplicate_error(hass): """Test that errors are shown when duplicates are added.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} MockConfigEntry( domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" @pytest.mark.parametrize( "get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)] ) async def test_invalid_api_key(hass, mock_aioambient): """Test that an invalid API/App Key throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "invalid_key"} @pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])]) async def test_no_devices(hass, mock_aioambient): """Test that an account with no associated devices throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "no_devices"} async def test_show_form(hass): """Test that the form is served with no input.""" flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_import(hass, mock_aioambient): """Test that the import step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_import(import_config=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", } @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_user(hass, mock_aioambient): """Test that the user step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", }
tboyce021/home-assistant
tests/components/ambient_station/test_config_flow.py
homeassistant/components/ifttt/config_flow.py
"""Adds config flow for Dune HD integration.""" import ipaddress import logging import re from pdunehd import DuneHDPlayer import voluptuous as vol from homeassistant import config_entries, exceptions from homeassistant.const import CONF_HOST from .const import DOMAIN # pylint:disable=unused-import _LOGGER = logging.getLogger(__name__) def host_valid(host): """Return True if hostname or IP address is valid.""" try: if ipaddress.ip_address(host).version == (4 or 6): return True except ValueError: if len(host) > 253: return False allowed = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in host.split(".")) class DuneHDConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Dune HD integration.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self): """Initialize.""" self.host = None async def init_device(self, host): """Initialize Dune HD player.""" player = DuneHDPlayer(host) state = await self.hass.async_add_executor_job(player.update_state) if not state: raise CannotConnect() async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: if host_valid(user_input[CONF_HOST]): self.host = user_input[CONF_HOST] try: if self.host_already_configured(self.host): raise AlreadyConfigured() await self.init_device(self.host) except CannotConnect: errors[CONF_HOST] = "cannot_connect" except AlreadyConfigured: errors[CONF_HOST] = "already_configured" else: return self.async_create_entry(title=self.host, data=user_input) else: errors[CONF_HOST] = "invalid_host" return self.async_show_form( step_id="user", data_schema=vol.Schema({vol.Required(CONF_HOST, default=""): str}), errors=errors, ) async def async_step_import(self, user_input=None): """Handle configuration by yaml file.""" self.host = user_input[CONF_HOST] if self.host_already_configured(self.host): return self.async_abort(reason="already_configured") try: await self.init_device(self.host) except CannotConnect: _LOGGER.error("Import aborted, cannot connect to %s", self.host) return self.async_abort(reason="cannot_connect") else: return self.async_create_entry(title=self.host, data=user_input) def host_already_configured(self, host): """See if we already have a dunehd entry matching user input configured.""" existing_hosts = { entry.data[CONF_HOST] for entry in self._async_current_entries() } return host in existing_hosts class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class AlreadyConfigured(exceptions.HomeAssistantError): """Error to indicate device is already configured."""
"""Define tests for the Ambient PWS config flow.""" import json from unittest.mock import patch import aioambient import pytest from homeassistant import data_entry_flow from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_KEY from tests.common import MockConfigEntry, load_fixture, mock_coro @pytest.fixture def get_devices_response(): """Define a fixture for a successful /devices response.""" return mock_coro() @pytest.fixture def mock_aioambient(get_devices_response): """Mock the aioambient library.""" with patch("homeassistant.components.ambient_station.config_flow.Client") as Client: Client().api.get_devices.return_value = get_devices_response yield Client async def test_duplicate_error(hass): """Test that errors are shown when duplicates are added.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} MockConfigEntry( domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" @pytest.mark.parametrize( "get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)] ) async def test_invalid_api_key(hass, mock_aioambient): """Test that an invalid API/App Key throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "invalid_key"} @pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])]) async def test_no_devices(hass, mock_aioambient): """Test that an account with no associated devices throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "no_devices"} async def test_show_form(hass): """Test that the form is served with no input.""" flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_import(hass, mock_aioambient): """Test that the import step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_import(import_config=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", } @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_user(hass, mock_aioambient): """Test that the user step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", }
tboyce021/home-assistant
tests/components/ambient_station/test_config_flow.py
homeassistant/components/dunehd/config_flow.py
"""Support for the OpenWeatherMap (OWM) service.""" from .abstract_owm_sensor import AbstractOpenWeatherMapSensor from .const import ( ATTR_API_FORECAST, DOMAIN, ENTRY_NAME, ENTRY_WEATHER_COORDINATOR, FORECAST_MONITORED_CONDITIONS, FORECAST_SENSOR_TYPES, MONITORED_CONDITIONS, WEATHER_SENSOR_TYPES, ) from .weather_update_coordinator import WeatherUpdateCoordinator async def async_setup_entry(hass, config_entry, async_add_entities): """Set up OpenWeatherMap sensor entities based on a config entry.""" domain_data = hass.data[DOMAIN][config_entry.entry_id] name = domain_data[ENTRY_NAME] weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR] weather_sensor_types = WEATHER_SENSOR_TYPES forecast_sensor_types = FORECAST_SENSOR_TYPES entities = [] for sensor_type in MONITORED_CONDITIONS: unique_id = f"{config_entry.unique_id}-{sensor_type}" entities.append( OpenWeatherMapSensor( name, unique_id, sensor_type, weather_sensor_types[sensor_type], weather_coordinator, ) ) for sensor_type in FORECAST_MONITORED_CONDITIONS: unique_id = f"{config_entry.unique_id}-forecast-{sensor_type}" entities.append( OpenWeatherMapForecastSensor( f"{name} Forecast", unique_id, sensor_type, forecast_sensor_types[sensor_type], weather_coordinator, ) ) async_add_entities(entities) class OpenWeatherMapSensor(AbstractOpenWeatherMapSensor): """Implementation of an OpenWeatherMap sensor.""" def __init__( self, name, unique_id, sensor_type, sensor_configuration, weather_coordinator: WeatherUpdateCoordinator, ): """Initialize the sensor.""" super().__init__( name, unique_id, sensor_type, sensor_configuration, weather_coordinator ) self._weather_coordinator = weather_coordinator @property def state(self): """Return the state of the device.""" return self._weather_coordinator.data.get(self._sensor_type, None) class OpenWeatherMapForecastSensor(AbstractOpenWeatherMapSensor): """Implementation of an OpenWeatherMap this day forecast sensor.""" def __init__( self, name, unique_id, sensor_type, sensor_configuration, weather_coordinator: WeatherUpdateCoordinator, ): """Initialize the sensor.""" super().__init__( name, unique_id, sensor_type, sensor_configuration, weather_coordinator ) self._weather_coordinator = weather_coordinator @property def state(self): """Return the state of the device.""" forecasts = self._weather_coordinator.data.get(ATTR_API_FORECAST) if forecasts is not None and len(forecasts) > 0: return forecasts[0].get(self._sensor_type, None) return None
"""Define tests for the Ambient PWS config flow.""" import json from unittest.mock import patch import aioambient import pytest from homeassistant import data_entry_flow from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow from homeassistant.config_entries import SOURCE_USER from homeassistant.const import CONF_API_KEY from tests.common import MockConfigEntry, load_fixture, mock_coro @pytest.fixture def get_devices_response(): """Define a fixture for a successful /devices response.""" return mock_coro() @pytest.fixture def mock_aioambient(get_devices_response): """Mock the aioambient library.""" with patch("homeassistant.components.ambient_station.config_flow.Client") as Client: Client().api.get_devices.return_value = get_devices_response yield Client async def test_duplicate_error(hass): """Test that errors are shown when duplicates are added.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} MockConfigEntry( domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data=conf ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" @pytest.mark.parametrize( "get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)] ) async def test_invalid_api_key(hass, mock_aioambient): """Test that an invalid API/App Key throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "invalid_key"} @pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])]) async def test_no_devices(hass, mock_aioambient): """Test that an account with no associated devices throws an error.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["errors"] == {"base": "no_devices"} async def test_show_form(hass): """Test that the form is served with no input.""" flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=None) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_import(hass, mock_aioambient): """Test that the import step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_import(import_config=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", } @pytest.mark.parametrize( "get_devices_response", [mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))], ) async def test_step_user(hass, mock_aioambient): """Test that the user step works.""" conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"} flow = config_flow.AmbientStationFlowHandler() flow.hass = hass flow.context = {"source": SOURCE_USER} result = await flow.async_step_user(user_input=conf) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == "67890fghij67" assert result["data"] == { CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij", }
tboyce021/home-assistant
tests/components/ambient_station/test_config_flow.py
homeassistant/components/openweathermap/sensor.py